source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
val_omp.c | /* This file performs the following test: each OMP thread measures flops
for its provided tasks, and compares this to expected flop counts, each
thread having been provided with a random amount of work, such that the
time and order that they complete their measurements varies.
Specifically tested is the case where the value returned for some threads
actually corresponds to that for another thread reading its counter values
at the same time.
- It is based on zero_omp.c but ignored much of its functionality.
- It attempts to use the following two counters. It may use less
depending on hardware counter resource limitations. These are counted
in the default counting domain and default granularity, depending on
the platform. Usually this is the user domain (PAPI_DOM_USER) and
thread context (PAPI_GRN_THR).
+ PAPI_FP_INS
+ PAPI_TOT_CYC
Each thread inside the Thread routine:
- Do prework (MAX_FLOPS - flops)
- Get cyc.
- Get us.
- Start counters
- Do flops
- Stop and read counters
- Get us.
- Get cyc.
- Return flops
*/
#include "papi_test.h"
#ifdef _OPENMP
#include <omp.h>
#else
#error "This compiler does not understand OPENMP"
#endif
const int MAX_FLOPS = NUM_FLOPS;
extern int TESTS_QUIET; /* Declared in test_utils.c */
const PAPI_hw_info_t *hw_info = NULL;
long_long Thread(int n)
{
int retval, num_tests = 1;
int EventSet1=PAPI_NULL;
int PAPI_event, mask1;
int num_events1;
long_long flops;
long_long **values;
long_long elapsed_us, elapsed_cyc;
char event_name[PAPI_MAX_STR_LEN];
/* printf("Thread(n=%d) 0x%x started\n", n, omp_get_thread_num()); */
num_events1 = 2;
/* add PAPI_TOT_CYC and one of the events in PAPI_FP_INS, PAPI_FP_OPS or
PAPI_TOT_INS, depending on the availability of the event on the
platform */
EventSet1 = add_two_events(&num_events1, &PAPI_event, hw_info, &mask1);
retval = PAPI_event_code_to_name(PAPI_event, event_name);
if (retval != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_event_code_to_name", retval);
values = allocate_test_space(num_tests, num_events1);
do_flops(MAX_FLOPS - n); /* prework for balance */
elapsed_us = PAPI_get_real_usec();
elapsed_cyc = PAPI_get_real_cyc();
retval = PAPI_start(EventSet1);
if (retval != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_start", retval);
do_flops(n);
retval = PAPI_stop(EventSet1, values[0]);
if (retval != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_stop", retval);
flops = (values[0])[0];
elapsed_us = PAPI_get_real_usec() - elapsed_us;
elapsed_cyc = PAPI_get_real_cyc() - elapsed_cyc;
remove_test_events(&EventSet1, mask1);
if (!TESTS_QUIET) {
/*printf("Thread 0x%x %-12s : \t%lld\t%d\n", omp_get_thread_num(), event_name,
(values[0])[0], n);*/
#if 0
printf("Thread 0x%x PAPI_TOT_CYC: \t%lld\n", omp_get_thread_num(), (values[0])[1]);
printf("Thread 0x%x Real usec : \t%lld\n", omp_get_thread_num(), elapsed_us);
printf("Thread 0x%x Real cycles : \t%lld\n", omp_get_thread_num(), elapsed_cyc);
#endif
}
/* It is illegal for the threads to exit in OpenMP */
/* test_pass(__FILE__,0,0); */
free_test_space(values, num_tests);
PAPI_unregister_thread();
/* printf("Thread 0x%x finished\n", omp_get_thread_num()); */
return flops;
}
int main(int argc, char **argv)
{
int tid, retval;
int maxthr = omp_get_max_threads();
int flopper = 0;
long_long *flops = calloc(maxthr, sizeof(long_long));
long_long *flopi = calloc(maxthr, sizeof(long_long));
tests_quiet(argc, argv); /* Set TESTS_QUIET variable */
if (maxthr < 2)
test_skip(__FILE__, __LINE__, "omp_get_num_threads < 2", PAPI_EINVAL);
if ((flops == NULL) || (flopi == NULL))
test_fail(__FILE__, __LINE__, "calloc", PAPI_ENOMEM);
retval = PAPI_library_init(PAPI_VER_CURRENT);
if (retval != PAPI_VER_CURRENT)
test_fail(__FILE__, __LINE__, "PAPI_library_init", retval);
if (!TESTS_QUIET) {
retval = PAPI_set_debug(PAPI_VERB_ECONT);
if (retval != PAPI_OK)
test_fail(__FILE__, __LINE__, "PAPI_set_debug", retval);
}
hw_info = PAPI_get_hardware_info();
if (hw_info == NULL)
test_fail(__FILE__, __LINE__, "PAPI_get_hardware_info", 2);
retval = PAPI_thread_init((unsigned long (*)(void)) (omp_get_thread_num));
if (retval != PAPI_OK)
if (retval == PAPI_ESBSTR)
test_skip(__FILE__, __LINE__, "PAPI_thread_init", retval);
else
test_fail(__FILE__, __LINE__, "PAPI_thread_init", retval);
flopper = Thread(65536) / 65536;
printf("flopper=%d\n", flopper);
for (int i=0; i<100000; i++)
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
flopi[tid] = rand()*3;
flops[tid] = Thread((flopi[tid]/flopper)%MAX_FLOPS);
#pragma omp barrier
#pragma omp master
if (flops[tid] < flopi[tid]) {
printf("test iteration=%d\n", i);
for (int j=0; j<omp_get_num_threads(); j++) {
printf("Thread 0x%x Value %6lld %c %6lld", j, flops[j],
(flops[j]<flopi[j])?'<':'=', flopi[j]);
for (int k=0; k<omp_get_num_threads(); k++)
if ((k != j) && (flops[k] == flops[j]))
printf(" == Thread 0x%x!", k);
printf("\n");
}
test_fail(__FILE__, __LINE__, "value returned for thread", PAPI_EBUG);
}
}
test_pass(__FILE__, NULL, 0);
exit(0);
}
|
reorder.c | // -----------------------------------------------------------------------------
//
// "00_AccelGraph"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : reorder.c
// Create : 2019-06-21 17:15:17
// Revise : 2019-09-28 15:35:52
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include <stdint.h>
#include "timer.h"
#include "myMalloc.h"
#include "graphConfig.h"
#include "edgeList.h"
#include "fixedPoint.h"
#include "sortRun.h"
#include "quantization.h"
#include "mt19937.h"
#include "vc_vector.h"
#include "graphCSR.h"
#include "reorder.h"
uint32_t RegionAtomicDecrement(uint32_t *region)
{
uint32_t oldValue;
uint32_t flag = 0;
do
{
oldValue = *region;
if(oldValue > 0)
{
if(__sync_bool_compare_and_swap(region, oldValue, (oldValue - 1)))
{
flag = 1;
}
}
else
{
return 0;
}
}
while(!flag);
return 1;
}
void radixSortCountSortEdgesByRanks (uint32_t **pageRanksFP, uint32_t **pageRanksFPTemp, uint32_t **labels, uint32_t **labelsTemp, uint32_t radix, uint32_t buckets, uint32_t *buckets_count, uint32_t num_vertices)
{
uint32_t *tempPointer1 = NULL;
uint32_t *tempPointer2 = NULL;
uint32_t t = 0;
uint32_t o = 0;
uint32_t u = 0;
uint32_t i = 0;
uint32_t j = 0;
uint32_t P = 1; // 32/8 8 bit radix needs 4 iterations
uint32_t t_id = 0;
uint32_t offset_start = 0;
uint32_t offset_end = 0;
uint32_t base = 0;
#pragma omp parallel default(none) shared(P,pageRanksFP, pageRanksFPTemp,radix,labels,labelsTemp,buckets,buckets_count, num_vertices) firstprivate(t_id, offset_end,offset_start,base,i,j,t,u,o)
{
t_id = omp_get_thread_num();
if(t_id == 0)
{
P = omp_get_num_threads();
}
#pragma omp barrier
offset_start = t_id * (num_vertices / P);
if(t_id == (P - 1))
{
offset_end = offset_start + (num_vertices / P) + (num_vertices % P) ;
}
else
{
offset_end = offset_start + (num_vertices / P);
}
//HISTOGRAM-KEYS
for(i = 0; i < buckets; i++)
{
buckets_count[(t_id * buckets) + i] = 0;
}
for (i = offset_start; i < offset_end; i++)
{
u = (*pageRanksFP)[i];
t = (u >> (radix * 8)) & 0xff;
buckets_count[(t_id * buckets) + t]++;
}
#pragma omp barrier
// SCAN BUCKETS
if(t_id == 0)
{
for(i = 0; i < buckets; i++)
{
for(j = 0 ; j < P; j++)
{
t = buckets_count[(j * buckets) + i];
buckets_count[(j * buckets) + i] = base;
base += t;
}
}
}
#pragma omp barrier
//RANK-AND-PERMUTE
for (i = offset_start; i < offset_end; i++) /* radix sort */
{
u = (*pageRanksFP)[i];
t = (u >> (radix * 8)) & 0xff;
o = buckets_count[(t_id * buckets) + t];
(*pageRanksFPTemp)[o] = (*pageRanksFP)[i];
(*labelsTemp)[o] = (*labels)[i];
buckets_count[(t_id * buckets) + t]++;
}
}
tempPointer1 = *labels;
*labels = *labelsTemp;
*labelsTemp = tempPointer1;
tempPointer2 = *pageRanksFP;
*pageRanksFP = *pageRanksFPTemp;
*pageRanksFPTemp = tempPointer2;
}
uint32_t *radixSortEdgesByPageRank (float *pageRanks, uint32_t *labels, uint32_t num_vertices)
{
// printf("*** START Radix Sort Edges By Source *** \n");
// struct Graph* graph = graphNew(edgeList->num_vertices, edgeList->num_edges, inverse);
// Do counting sort for every digit. Note that instead
// of passing digit number, exp is passed. exp is 10^i
// where i is current digit number
uint32_t v;
uint32_t radix = 4; // 32/8 8 bit radix needs 4 iterations
uint32_t P = omp_get_max_threads(); // 32/8 8 bit radix needs 4 iterations
uint32_t buckets = 256; // 2^radix = 256 buckets
uint32_t *buckets_count = NULL;
// omp_set_num_threads(P);
uint32_t j = 0; //1,2,3 iteration
uint32_t *pageRanksFP = NULL;
uint32_t *pageRanksFPTemp = NULL;
uint32_t *labelsTemp = NULL;
buckets_count = (uint32_t *) my_malloc(P * buckets * sizeof(uint32_t));
pageRanksFP = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t));
pageRanksFPTemp = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t));
labelsTemp = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t));
#pragma omp parallel for
for(v = 0; v < num_vertices; v++)
{
pageRanksFP[v] = FloatToFixed32SORT(pageRanks[v]);
pageRanksFPTemp[v] = 0;
labelsTemp[v] = 0;
}
for(j = 0 ; j < radix ; j++)
{
radixSortCountSortEdgesByRanks (&pageRanksFP, &pageRanksFPTemp, &labels, &labelsTemp, j, buckets, buckets_count, num_vertices);
}
free(buckets_count);
free(pageRanksFP);
free(pageRanksFPTemp);
free(labelsTemp);
// for(v = 0; v < num_vertices; v++)
// {
// printf("rank %u label %u pr %.22f \n",v, labelsInternal[v], pageRanks[labelsInternal[v]]);
// }
return labels;
}
uint32_t *radixSortEdgesByDegree (uint32_t *degrees, uint32_t *labels, uint32_t num_vertices)
{
// printf("*** START Radix Sort Edges By Source *** \n");
// struct Graph* graph = graphNew(edgeList->num_vertices, edgeList->num_edges, inverse);
// Do counting sort for every digit. Note that instead
// of passing digit number, exp is passed. exp is 10^i
// where i is current digit number
uint32_t radix = 4; // 32/8 8 bit radix needs 4 iterations
uint32_t P = omp_get_max_threads(); // 32/8 8 bit radix needs 4 iterations
uint32_t buckets = 256; // 2^radix = 256 buckets
uint32_t *buckets_count = NULL;
// omp_set_num_threads(P);
uint32_t j = 0; //1,2,3 iteration
uint32_t *degreesTemp = NULL;
uint32_t *labelsTemp = NULL;
buckets_count = (uint32_t *) my_malloc(P * buckets * sizeof(uint32_t));
degreesTemp = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t));
labelsTemp = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t));
#pragma omp parallel
for (j = 0; j < num_vertices; ++j)
{
labelsTemp[j] = 0;
degreesTemp[j] = 0;
}
for(j = 0 ; j < radix ; j++)
{
radixSortCountSortEdgesByRanks (°rees, °reesTemp, &labels, &labelsTemp, j, buckets, buckets_count, num_vertices);
}
free(buckets_count);
free(degreesTemp);
free(labelsTemp);
return labels;
}
// ********************************************************************************************
// *************** Degree relabel **************
// ********************************************************************************************
struct EdgeList *reorderGraphProcessDegree( uint32_t sort, struct EdgeList *edgeList, uint32_t lmode)
{
uint32_t i;
uint32_t *degrees;
degrees = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
#pragma omp parallel
for (i = 0; i < edgeList->num_vertices; ++i)
{
degrees[i] = 0;
}
degrees = reorderGraphGenerateInOutDegrees( degrees, edgeList, lmode);
edgeList = reorderGraphListDegree( edgeList, degrees, lmode);
free(degrees);
return edgeList;
}
struct EdgeList *reorderGraphListDegree(struct EdgeList *edgeList, uint32_t *degrees, uint32_t lmode)
{
uint32_t v;
uint32_t *labelsInverse;
uint32_t *labels;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
labelsInverse = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Degree Reordering/Relabeling");
printf(" -----------------------------------------------------\n");
switch(lmode)
{
case 1 :
printf("| %-51s | \n", "OUT-DEGREE");
break;
case 2 :
printf("| %-51s | \n", "IN-DEGREE");
break;
case 3 :
printf("| %-51s | \n", "(IN+OUT)-DEGREE");
break;
case 10 :
printf("| %-51s | \n", "RANDOM-DEGREE");
break;
default :
printf("| %-51s | \n", "OUT-DEGREE");
}
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for
for(v = 0; v < edgeList->num_vertices; v++)
{
labelsInverse[v] = v;
}
labelsInverse = radixSortEdgesByDegree(degrees, labelsInverse, edgeList->num_vertices);
#pragma omp parallel for
for(v = 0; v < edgeList->num_vertices; v++)
{
labels[labelsInverse[v]] = edgeList->num_vertices - 1 - v;
}
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Reordering Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
Start(timer);
edgeList = relabelEdgeList(edgeList, labels);
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Relabeling Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
#pragma omp parallel for
for (v = 0; v < edgeList->num_vertices; ++v)
{
edgeList->label_array[v] = labels[edgeList->label_array[v]];
edgeList->inverse_label_array[edgeList->label_array[v]] = v;
}
free(timer);
free(labelsInverse);
free(labels);
return edgeList;
}
// ********************************************************************************************
// *************** DBG relabel **************
// ********************************************************************************************
struct EdgeList *reorderGraphProcessDBG( uint32_t sort, struct EdgeList *edgeList, uint32_t lmode)
{
// UINT32_MAX
uint32_t i;
uint32_t *degrees;
uint32_t *thresholds;
uint32_t num_buckets = 11;
degrees = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t));
#pragma omp parallel
for (i = 0; i < edgeList->num_vertices; ++i)
{
degrees[i] = 0;
}
// START initialize thresholds
if(edgeList->avg_degree <= 1)
thresholds[0] = 1;
else
thresholds[0] = (edgeList->avg_degree / 2);
for ( i = 1; i < (num_buckets - 1); ++i)
{
thresholds[i] = thresholds[i - 1] * 2;
}
thresholds[num_buckets - 1] = UINT32_MAX;
// END initialize thresholds
switch(lmode)
{
case 4 :
printf("| %-51s | \n", "DBG OUT-DEGREE");
break;
case 5 :
printf("| %-51s | \n", "DBG IN-DEGREE");
break;
default :
printf("| %-51s | \n", "DBG OUT-DEGREE");
}
degrees = reorderGraphGenerateInOutDegrees(degrees, edgeList, lmode);
edgeList = reorderGraphListDBG(edgeList, degrees, thresholds, num_buckets, lmode);
free(thresholds);
free(degrees);
return edgeList;
}
struct EdgeList *reorderGraphListDBG(struct EdgeList *edgeList, uint32_t *degrees, uint32_t *thresholds, uint32_t num_buckets, uint32_t lmode)
{
uint32_t i = 0;
int32_t j = 0;
int32_t k = 0;
void *iter = 0;
uint32_t v = 0;
uint32_t t = 0;
uint32_t temp_idx = 0;
uint32_t P = 1;
uint32_t t_id = 0;
uint32_t offset_start = 0;
uint32_t offset_end = 0;
uint32_t *start_idx = NULL;
vc_vector **buckets = NULL;
uint32_t *labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Start(timer);
#pragma omp parallel default(none) shared(P,labels,buckets,edgeList,num_buckets,degrees,thresholds,start_idx) firstprivate(iter,temp_idx,k,offset_start,offset_end,t_id,i,j,v,t)
{
t_id = omp_get_thread_num();
if(t_id == 0)
{
P = omp_get_num_threads();
start_idx = (uint32_t *) my_malloc(P * num_buckets * sizeof(uint32_t));
buckets = (vc_vector **) malloc(P * num_buckets * sizeof(vc_vector *));
}
#pragma omp barrier
for (i = 0; i < num_buckets; ++i)
{
buckets[(t_id * num_buckets) + i] = vc_vector_create(0, sizeof(uint32_t), NULL);
}
offset_start = t_id * (edgeList->num_vertices / P);
if(t_id == (P - 1))
{
offset_end = offset_start + (edgeList->num_vertices / P) + (edgeList->num_vertices % P) ;
}
else
{
offset_end = offset_start + (edgeList->num_vertices / P);
}
for (v = offset_start; v < offset_end; ++v)
{
for ( i = 0; i < num_buckets; ++i)
{
if(degrees[v] <= thresholds[i])
{
vc_vector_push_back(buckets[(t_id * num_buckets) + i], &v);
break;
}
}
}
#pragma omp barrier
if(t_id == 0)
{
for ( j = num_buckets - 1; j >= 0; --j)
{
for (t = 0; t < P; ++t)
{
start_idx[(t * num_buckets) + j] = temp_idx;
temp_idx += vc_vector_count(buckets[(t * num_buckets) + j]);
}
}
}
#pragma omp barrier
for ( j = num_buckets - 1 ; j >= 0 ; --j)
{
k = start_idx[(t_id * num_buckets) + j];
for ( iter = vc_vector_begin(buckets[(t_id * num_buckets) + j]);
iter != vc_vector_end(buckets[(t_id * num_buckets) + j]);
iter = vc_vector_next(buckets[(t_id * num_buckets) + j], iter))
{
labels[(*(uint32_t *)iter)] = k++;
}
}
}
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Reordering Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
Start(timer);
edgeList = relabelEdgeList(edgeList, labels);
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Relabeling Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
#pragma omp parallel for
for (v = 0; v < edgeList->num_vertices; ++v)
{
edgeList->label_array[v] = labels[edgeList->label_array[v]];
edgeList->inverse_label_array[edgeList->label_array[v]] = v;
}
for (i = 0; i < (P * num_buckets); ++i)
{
vc_vector_release(buckets[i]);
}
free(timer);
free(buckets);
free(start_idx);
free(labels);
return edgeList;
}
// ********************************************************************************************
// *************** Corder relabel **************
// ********************************************************************************************
struct EdgeList *reorderGraphProcessCorder( uint32_t sort, struct EdgeList *edgeList, uint32_t lmode)
{
// UINT32_MAX
uint32_t i;
uint32_t *degrees;
uint32_t *thresholds;
uint32_t num_buckets = 11;
degrees = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t));
#pragma omp parallel
for (i = 0; i < edgeList->num_vertices; ++i)
{
degrees[i] = 0;
}
// START initialize thresholds
if(edgeList->avg_degree <= 1)
thresholds[0] = 1;
else
thresholds[0] = (edgeList->avg_degree / 2);
for ( i = 1; i < (num_buckets - 1); ++i)
{
thresholds[i] = thresholds[i - 1] * 2;
}
thresholds[num_buckets - 1] = UINT32_MAX;
// END initialize thresholds
switch(lmode)
{
case 12 :
printf("| %-51s | \n", "Corder OUT-DEGREE");
break;
case 13 :
printf("| %-51s | \n", "Corder IN-DEGREE");
break;
default :
printf("| %-51s | \n", "Corder OUT-DEGREE");
}
degrees = reorderGraphGenerateInOutDegrees(degrees, edgeList, lmode);
edgeList = reorderGraphListDBG(edgeList, degrees, thresholds, num_buckets, lmode);
free(thresholds);
free(degrees);
return edgeList;
}
struct EdgeList *reorderGraphListCorder(struct EdgeList *edgeList, uint32_t *degrees, uint32_t *thresholds, uint32_t num_buckets, uint32_t lmode)
{
uint32_t i = 0;
int32_t j = 0;
int32_t k = 0;
void *iter = 0;
uint32_t v = 0;
uint32_t t = 0;
uint32_t temp_idx = 0;
uint32_t P = 1;
uint32_t t_id = 0;
uint32_t offset_start = 0;
uint32_t offset_end = 0;
uint32_t *start_idx = NULL;
vc_vector **buckets = NULL;
uint32_t *labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Start(timer);
#pragma omp parallel default(none) shared(P,labels,buckets,edgeList,num_buckets,degrees,thresholds,start_idx) firstprivate(iter,temp_idx,k,offset_start,offset_end,t_id,i,j,v,t)
{
t_id = omp_get_thread_num();
if(t_id == 0)
{
P = omp_get_num_threads();
start_idx = (uint32_t *) my_malloc(P * num_buckets * sizeof(uint32_t));
buckets = (vc_vector **) malloc(P * num_buckets * sizeof(vc_vector *));
}
#pragma omp barrier
for (i = 0; i < num_buckets; ++i)
{
buckets[(t_id * num_buckets) + i] = vc_vector_create(0, sizeof(uint32_t), NULL);
}
offset_start = t_id * (edgeList->num_vertices / P);
if(t_id == (P - 1))
{
offset_end = offset_start + (edgeList->num_vertices / P) + (edgeList->num_vertices % P) ;
}
else
{
offset_end = offset_start + (edgeList->num_vertices / P);
}
for (v = offset_start; v < offset_end; ++v)
{
for ( i = 0; i < num_buckets; ++i)
{
if(degrees[v] <= thresholds[i])
{
vc_vector_push_back(buckets[(t_id * num_buckets) + i], &v);
break;
}
}
}
#pragma omp barrier
if(t_id == 0)
{
for ( j = num_buckets - 1; j >= 0; --j)
{
for (t = 0; t < P; ++t)
{
start_idx[(t * num_buckets) + j] = temp_idx;
temp_idx += vc_vector_count(buckets[(t * num_buckets) + j]);
}
}
}
#pragma omp barrier
for ( j = num_buckets - 1 ; j >= 0 ; --j)
{
k = start_idx[(t_id * num_buckets) + j];
for ( iter = vc_vector_begin(buckets[(t_id * num_buckets) + j]);
iter != vc_vector_end(buckets[(t_id * num_buckets) + j]);
iter = vc_vector_next(buckets[(t_id * num_buckets) + j], iter))
{
labels[(*(uint32_t *)iter)] = k++;
}
}
}
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Reordering Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
Start(timer);
edgeList = relabelEdgeList(edgeList, labels);
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Relabeling Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
#pragma omp parallel for
for (v = 0; v < edgeList->num_vertices; ++v)
{
edgeList->label_array[v] = labels[edgeList->label_array[v]];
edgeList->inverse_label_array[edgeList->label_array[v]] = v;
}
for (i = 0; i < (P * num_buckets); ++i)
{
vc_vector_release(buckets[i]);
}
free(timer);
free(buckets);
free(start_idx);
free(labels);
return edgeList;
}
// ********************************************************************************************
// *************** HUBSort relabel **************
// ********************************************************************************************
struct EdgeList *reorderGraphProcessHUBSort( uint32_t sort, struct EdgeList *edgeList, uint32_t lmode)
{
// UINT32_MAX
uint32_t i;
uint32_t *degrees;
uint32_t *thresholds;
uint32_t num_buckets = 2;
degrees = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t));
#pragma omp parallel for
for (i = 0; i < edgeList->num_vertices; ++i)
{
degrees[i] = 0;
}
// START initialize thresholds
if(edgeList->avg_degree <= 1)
thresholds[0] = 1;
else
thresholds[0] = (edgeList->avg_degree / 2);
for ( i = 1; i < (num_buckets - 1); ++i)
{
thresholds[i] = thresholds[i - 1] * 2;
}
thresholds[num_buckets - 1] = UINT32_MAX;
// END initialize thresholds
switch(lmode)
{
case 6 :
printf("| %-51s | \n", "HUBSort OUT-DEGREE");
break;
case 7 :
printf("| %-51s | \n", "HUBSort IN-DEGREE");
break;
default :
printf("| %-51s | \n", "HUBSort OUT-DEGREE");
}
degrees = reorderGraphGenerateInOutDegrees(degrees, edgeList, lmode);
edgeList = reorderGraphListHUBSort(edgeList, degrees, thresholds, num_buckets, lmode);
free(thresholds);
free(degrees);
return edgeList;
}
struct EdgeList *reorderGraphListHUBSort(struct EdgeList *edgeList, uint32_t *degrees, uint32_t *thresholds, uint32_t num_buckets, uint32_t lmode)
{
uint32_t i = 0;
int32_t j = 0;
int32_t k = 0;
void *iter = 0;
uint32_t v = 0;
uint32_t t = 0;
uint32_t temp_idx = 0;
uint32_t P = 1;
uint32_t t_id = 0;
uint32_t offset_start = 0;
uint32_t offset_end = 0;
uint32_t *start_idx = NULL;
vc_vector **buckets = NULL;
uint32_t *labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
uint32_t *sizeHot = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t));
uint32_t **degreesHot = (uint32_t **) my_malloc(num_buckets * sizeof(uint32_t *));
uint32_t **verticesHot = (uint32_t **) my_malloc(num_buckets * sizeof(uint32_t *));
Start(timer);
#pragma omp parallel default(none) shared(P,verticesHot,degreesHot,sizeHot,labels,buckets,edgeList,num_buckets,degrees,thresholds,start_idx) firstprivate(iter,temp_idx,k,offset_start,offset_end,t_id,i,j,v,t)
{
t_id = omp_get_thread_num();
if(t_id == 0)
{
P = omp_get_num_threads();
start_idx = (uint32_t *) my_malloc(P * num_buckets * sizeof(uint32_t));
buckets = (vc_vector **) malloc(P * num_buckets * sizeof(vc_vector *));
}
#pragma omp barrier
for (i = 0; i < num_buckets; ++i)
{
buckets[(t_id * num_buckets) + i] = vc_vector_create(0, sizeof(uint32_t), NULL);
}
offset_start = t_id * (edgeList->num_vertices / P);
if(t_id == (P - 1))
{
offset_end = offset_start + (edgeList->num_vertices / P) + (edgeList->num_vertices % P) ;
}
else
{
offset_end = offset_start + (edgeList->num_vertices / P);
}
for (v = offset_start; v < offset_end; ++v)
{
for ( i = 0; i < num_buckets; ++i)
{
if(degrees[v] <= thresholds[i])
{
vc_vector_push_back(buckets[(t_id * num_buckets) + i], &v);
break;
}
}
}
#pragma omp barrier
if(t_id == 0)
{
for ( j = num_buckets - 1; j >= 0; --j)
{
temp_idx = 0;
for (t = 0; t < P; ++t)
{
start_idx[(t * num_buckets) + j] = temp_idx;
temp_idx += vc_vector_count(buckets[(t * num_buckets) + j]);
}
sizeHot[j] = temp_idx;
degreesHot[j] = (uint32_t *) my_malloc(sizeHot[j] * sizeof(uint32_t));
verticesHot[j] = (uint32_t *) my_malloc(sizeHot[j] * sizeof(uint32_t));
}
}
#pragma omp barrier
for ( j = num_buckets - 1 ; j >= 0 ; --j)
{
k = start_idx[(t_id * num_buckets) + j];
for ( iter = vc_vector_begin(buckets[(t_id * num_buckets) + j]);
iter != vc_vector_end(buckets[(t_id * num_buckets) + j]);
iter = vc_vector_next(buckets[(t_id * num_buckets) + j], iter))
{
verticesHot[j][k] = (*(uint32_t *)iter);
degreesHot[j][k] = degrees[(*(uint32_t *)iter)];
k++;
}
}
}
verticesHot[num_buckets - 1] = radixSortEdgesByDegree(degreesHot[num_buckets - 1], verticesHot[num_buckets - 1], sizeHot[num_buckets - 1]);
#pragma omp parallel for
for(v = 0; v < sizeHot[1]; v++)
{
labels[verticesHot[1][v]] = sizeHot[1] - 1 - v;
}
#pragma omp parallel for
for(v = 0; v < sizeHot[0]; v++)
{
labels[verticesHot[0][v]] = sizeHot[1] + (v);
}
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Reordering Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
Start(timer);
edgeList = relabelEdgeList(edgeList, labels);
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Relabeling Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
#pragma omp parallel for
for (v = 0; v < edgeList->num_vertices; ++v)
{
edgeList->label_array[v] = labels[edgeList->label_array[v]];
edgeList->inverse_label_array[edgeList->label_array[v]] = v;
}
for (i = 0; i < (P * num_buckets); ++i)
{
vc_vector_release(buckets[i]);
}
for (i = 0; i < num_buckets; ++i)
{
free(degreesHot[i]);
free(verticesHot[i]);
}
free(degreesHot);
free(verticesHot);
free(sizeHot);
free(timer);
free(buckets);
free(start_idx);
free(labels);
return edgeList;
}
// ********************************************************************************************
// *************** HUBCluster relabel **************
// ********************************************************************************************
struct EdgeList *reorderGraphProcessHUBCluster( uint32_t sort, struct EdgeList *edgeList, uint32_t lmode)
{
// UINT32_MAX
uint32_t i;
uint32_t *degrees;
uint32_t *thresholds;
uint32_t num_buckets = 2;
degrees = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t));
#pragma omp parallel for
for (i = 0; i < edgeList->num_vertices; ++i)
{
degrees[i] = 0;
}
// START initialize thresholds
thresholds[0] = (edgeList->avg_degree);
thresholds[num_buckets - 1] = UINT32_MAX;
// END initialize thresholds
switch(lmode)
{
case 8 :
printf("| %-51s | \n", "HUBCluster OUT-DEGREE");
break;
case 9 :
printf("| %-51s | \n", "HUBCluster IN-DEGREE");
break;
default :
printf("| %-51s | \n", "HUBCluster OUT-DEGREE");
}
degrees = reorderGraphGenerateInOutDegrees(degrees, edgeList, lmode);
edgeList = reorderGraphListHUBCluster(edgeList, degrees, thresholds, num_buckets, lmode);
free(thresholds);
free(degrees);
return edgeList;
}
struct EdgeList *reorderGraphListHUBCluster(struct EdgeList *edgeList, uint32_t *degrees, uint32_t *thresholds, uint32_t num_buckets, uint32_t lmode)
{
uint32_t i = 0;
int32_t j = 0;
int32_t k = 0;
void *iter = 0;
uint32_t v = 0;
uint32_t t = 0;
uint32_t temp_idx = 0;
uint32_t P = 1;
uint32_t t_id = 0;
uint32_t offset_start = 0;
uint32_t offset_end = 0;
uint32_t *start_idx = NULL;
vc_vector **buckets = NULL;
uint32_t *labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
Start(timer);
#pragma omp parallel default(none) shared(P,labels,buckets,edgeList,num_buckets,degrees,thresholds,start_idx) firstprivate(iter,temp_idx,k,offset_start,offset_end,t_id,i,j,v,t)
{
t_id = omp_get_thread_num();
if(t_id == 0)
{
P = omp_get_num_threads();
start_idx = (uint32_t *) my_malloc(P * num_buckets * sizeof(uint32_t));
buckets = (vc_vector **) malloc(P * num_buckets * sizeof(vc_vector *));
}
#pragma omp barrier
for (i = 0; i < num_buckets; ++i)
{
buckets[(t_id * num_buckets) + i] = vc_vector_create(0, sizeof(uint32_t), NULL);
}
offset_start = t_id * (edgeList->num_vertices / P);
if(t_id == (P - 1))
{
offset_end = offset_start + (edgeList->num_vertices / P) + (edgeList->num_vertices % P) ;
}
else
{
offset_end = offset_start + (edgeList->num_vertices / P);
}
for (v = offset_start; v < offset_end; ++v)
{
for ( i = 0; i < num_buckets; ++i)
{
if(degrees[v] <= thresholds[i])
{
vc_vector_push_back(buckets[(t_id * num_buckets) + i], &v);
break;
}
}
}
#pragma omp barrier
if(t_id == 0)
{
for ( j = num_buckets - 1; j >= 0; --j)
{
for (t = 0; t < P; ++t)
{
start_idx[(t * num_buckets) + j] = temp_idx;
temp_idx += vc_vector_count(buckets[(t * num_buckets) + j]);
}
}
}
#pragma omp barrier
for ( j = num_buckets - 1 ; j >= 0 ; --j)
{
k = start_idx[(t_id * num_buckets) + j];
for ( iter = vc_vector_begin(buckets[(t_id * num_buckets) + j]);
iter != vc_vector_end(buckets[(t_id * num_buckets) + j]);
iter = vc_vector_next(buckets[(t_id * num_buckets) + j], iter))
{
labels[(*(uint32_t *)iter)] = k++;
}
}
}
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Reordering Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
Start(timer);
edgeList = relabelEdgeList(edgeList, labels);
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Relabeling Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
#pragma omp parallel for
for (v = 0; v < edgeList->num_vertices; ++v)
{
edgeList->label_array[v] = labels[edgeList->label_array[v]];
edgeList->inverse_label_array[edgeList->label_array[v]] = v;
}
for (i = 0; i < (P * num_buckets); ++i)
{
vc_vector_release(buckets[i]);
}
free(timer);
free(buckets);
free(start_idx);
free(labels);
return edgeList;
}
// ********************************************************************************************
// *************** AccelGraph label-Masking **************
// ********************************************************************************************
struct EdgeList *maskGraphProcess(struct EdgeList *edgeList, struct Arguments *arguments)
{
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
printf(" *****************************************************\n");
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Mask Process");
printf(" -----------------------------------------------------\n");
Start(timer);
uint32_t cache_size = ((arguments->cache_size) >> 2);
switch(arguments->lmode)
{
case 1 :
case 2 :
case 3 :
case 4 :
edgeList = maskGraphProcessDegree( edgeList, arguments->mmode, cache_size); // degree
break;
default :
edgeList = maskGraphProcessDegree( edgeList, arguments->mmode, cache_size); // out-degree
}
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Total Mask Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
printf(" *****************************************************\n");
free(timer);
return edgeList;
}
struct EdgeList *maskGraphProcessDegree( struct EdgeList *edgeList, uint32_t mmode, uint32_t cache_size)
{
// UINT32_MAX
uint32_t i;
uint32_t *degrees;
uint32_t *thresholds;
uint32_t num_buckets = 11;
degrees = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t));
#pragma omp parallel
for (i = 0; i < edgeList->num_vertices; ++i)
{
degrees[i] = 0;
}
// START initialize thresholds
if(edgeList->avg_degree <= 1)
thresholds[0] = 1;
else
thresholds[0] = (edgeList->avg_degree / 2);
for ( i = 1; i < (num_buckets - 1); ++i)
{
thresholds[i] = thresholds[i - 1] * 2;
}
thresholds[num_buckets - 1] = UINT32_MAX;
// END initialize thresholds
switch(mmode)
{
case 1 :
printf("| %-51s | \n", "Vertex Property OUT-DEGREE");
break;
case 2 :
printf("| %-51s | \n", "Vertex Structure IN-DEGREE");
break;
case 3 :
printf("| %-51s | \n", "Vertex Property OUT-DEGREE");
break;
case 4 :
printf("| %-51s | \n", "Vertex Structure IN-DEGREE");
break;
default :
printf("| %-51s | \n", "Vertex Property OUT-DEGREE");
}
degrees = maskGraphProcessGenerateInOutDegrees(degrees, edgeList, mmode);
edgeList = maskGraphProcessGenerateMaskArray(edgeList, degrees, thresholds, num_buckets, mmode, cache_size);
free(thresholds);
free(degrees);
return edgeList;
}
uint32_t *maskGraphProcessGenerateInOutDegrees(uint32_t *degrees, struct EdgeList *edgeList, uint32_t mmode)
{
uint32_t i;
uint32_t src;
uint32_t dest;
#pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,degrees,mmode)
for(i = 0; i < edgeList->num_edges; i++)
{
src = edgeList->edges_array_src[i];
dest = edgeList->edges_array_dest[i];
switch(mmode)
{
case 1 :
case 3 :
{
#pragma omp atomic update
degrees[src]++;
}
break;
case 2 :
case 4 :
{
#pragma omp atomic update
degrees[dest]++;
}
break;
case 5 :
case 6 :
{
#pragma omp atomic update
degrees[dest]++;
#pragma omp atomic update
degrees[src]++;
}
break;
default :
{
#pragma omp atomic update
degrees[src]++;
}// out-degree
}
}
return degrees;
}
struct EdgeList *maskGraphProcessGenerateMaskArray(struct EdgeList *edgeList, uint32_t *degrees, uint32_t *thresholds, uint32_t num_buckets, uint32_t mmode, uint32_t cache_size)
{
uint32_t i = 0;
int32_t j = 0;
void *iter = 0;
uint32_t v = 0;
uint32_t t = 0;
uint32_t temp_idx = 0;
uint32_t P = 1;
uint32_t t_id = 0;
uint32_t offset_start = 0;
uint32_t offset_end = 0;
uint32_t num_masks = 4;
uint32_t *start_idx = NULL;
vc_vector **buckets = NULL;
uint32_t *labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
uint32_t *mask_array = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
uint32_t *cache_regions = (uint32_t *) my_malloc(num_masks * sizeof(uint32_t));
int diff = (int)edgeList->num_vertices - (int)cache_size;
if(diff < (2 * (int)cache_size))
{
cache_regions[0] = edgeList->num_vertices / 3; // VERTEX_VALUE_HOT_U32
cache_regions[1] = edgeList->num_vertices / 3; // VERTEX_CACHE_WARM_U32
cache_regions[2] = edgeList->num_vertices / 3; // VERTEX_VALUE_LUKEWARM_U32
}
else
{
cache_regions[0] = cache_size*8; // VERTEX_VALUE_HOT_U32
cache_regions[1] = cache_regions[0]*4; // VERTEX_CACHE_WARM_U32
cache_regions[2] = cache_regions[1]*4; // VERTEX_VALUE_LUKEWARM_U32
}
cache_regions[3] = UINT32_MAX; // VERTEX_CACHE_COLD_U32
#pragma omp parallel for
for (i = 0; i < edgeList->num_vertices; ++i)
{
mask_array[i] = VERTEX_CACHE_COLD_U32;
}
Start(timer);
#pragma omp parallel default(none) shared(P,mask_array,mmode,cache_regions,labels,buckets,edgeList,num_buckets,degrees,thresholds,start_idx) firstprivate(iter,temp_idx,offset_start,offset_end,t_id,i,j,v,t)
{
t_id = omp_get_thread_num();
if(t_id == 0)
{
P = omp_get_num_threads();
start_idx = (uint32_t *) my_malloc(P * num_buckets * sizeof(uint32_t));
buckets = (vc_vector **) malloc(P * num_buckets * sizeof(vc_vector *));
}
#pragma omp barrier
for (i = 0; i < num_buckets; ++i)
{
buckets[(t_id * num_buckets) + i] = vc_vector_create(0, sizeof(uint32_t), NULL);
}
offset_start = t_id * (edgeList->num_vertices / P);
if(t_id == (P - 1))
{
offset_end = offset_start + (edgeList->num_vertices / P) + (edgeList->num_vertices % P) ;
}
else
{
offset_end = offset_start + (edgeList->num_vertices / P);
}
for (v = offset_start; v < offset_end; ++v)
{
for ( i = 0; i < num_buckets; ++i)
{
if(degrees[v] <= thresholds[i])
{
vc_vector_push_back(buckets[(t_id * num_buckets) + i], &v);
break;
}
}
}
#pragma omp barrier
if(t_id == 0)
{
for ( j = num_buckets - 1; j >= 0; --j)
{
for (t = 0; t < P; ++t)
{
start_idx[(t * num_buckets) + j] = temp_idx;
temp_idx += vc_vector_count(buckets[(t * num_buckets) + j]);
}
}
}
#pragma omp barrier
for ( j = num_buckets - 1 ; j >= 0 ; --j)
{
for ( iter = vc_vector_begin(buckets[(t_id * num_buckets) + j]);
iter != vc_vector_end(buckets[(t_id * num_buckets) + j]);
iter = vc_vector_next(buckets[(t_id * num_buckets) + j], iter))
{
if(RegionAtomicDecrement(&(cache_regions[0])))
{
mask_array[(*(uint32_t *)iter)] = VERTEX_VALUE_HOT_U32;
}
else if(RegionAtomicDecrement(&(cache_regions[1])))
{
mask_array[(*(uint32_t *)iter)] = VERTEX_CACHE_WARM_U32;
}
else if(RegionAtomicDecrement(&(cache_regions[2])))
{
mask_array[(*(uint32_t *)iter)] = VERTEX_VALUE_LUKEWARM_U32;
}
else
{
mask_array[(*(uint32_t *)iter)] = VERTEX_CACHE_COLD_U32;
}
}
}
}
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Mask Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
if(mmode == 1 || mmode == 2)
{
Start(timer);
edgeList = maskEdgeList(edgeList, mask_array);
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Relabeling Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
}
#pragma omp parallel for
for (i = 0; i < edgeList->num_vertices; ++i)
{
edgeList->mask_array[i] = mask_array[i];
}
for (i = 0; i < (P * num_buckets); ++i)
{
vc_vector_release(buckets[i]);
}
free(mask_array);
free(timer);
free(buckets);
free(start_idx);
free(labels);
free(cache_regions);
return edgeList;
}
// ********************************************************************************************
// *************** generic functions **************
// ********************************************************************************************
uint32_t *reorderGraphGenerateInOutDegrees(uint32_t *degrees, struct EdgeList *edgeList, uint32_t lmode)
{
uint32_t i;
uint32_t src;
uint32_t dest;
if(lmode != 10)
{
#pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,degrees,lmode)
for(i = 0; i < edgeList->num_edges; i++)
{
src = edgeList->edges_array_src[i];
dest = edgeList->edges_array_dest[i];
switch(lmode)
{
case 1 :
case 4 :
case 6 :
case 8 :
{
#pragma omp atomic update
degrees[src]++;
} // degree
break;
case 2 :
case 5 :
case 7 :
case 9 :
{
#pragma omp atomic update
degrees[dest]++;
}
break;
case 3 :
{
#pragma omp atomic update
degrees[dest]++;
#pragma omp atomic update
degrees[src]++;
}
break;
default :
{
#pragma omp atomic update
degrees[src]++;
}// out-degree
}
}
}
if(lmode == 10)
{
mt19937state *mt19937var = (mt19937state *) my_malloc(sizeof(mt19937state));
initializeMersenneState (mt19937var, 27491095);
#pragma omp parallel for firstprivate(mt19937var)
for (i = 0; i < edgeList->num_vertices; ++i)
{
degrees[i] = (generateRandInt(mt19937var) % edgeList->num_vertices) + omp_get_thread_num();
}
free(mt19937var);
}
return degrees;
}
struct EdgeList *reorderGraphProcess(struct EdgeList *edgeList, struct Arguments *arguments)
{
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
// printf("Filename : %s \n",fnameb);
printf(" *****************************************************\n");
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Reorder Process");
printf(" -----------------------------------------------------\n");
Start(timer);
switch(arguments->lmode)
{
case 1 :
case 2 :
case 3 :
case 10 :
edgeList = reorderGraphProcessDegree( arguments->sort, edgeList, arguments->lmode);// degree
break;
case 4 :
case 5 :
edgeList = reorderGraphProcessDBG( arguments->sort, edgeList, arguments->lmode);// DBG
break;
case 6 :
case 7 :
edgeList = reorderGraphProcessHUBSort( arguments->sort, edgeList, arguments->lmode);// HUBSort
break;
case 8 :
case 9 :
edgeList = reorderGraphProcessHUBCluster( arguments->sort, edgeList, arguments->lmode);// HUBCluster
break;
case 11 :
edgeList = relabelEdgeListFromFile(edgeList, arguments->fnamel, edgeList->num_vertices);// load from file
break;
case 12 :
case 13 :
edgeList = reorderGraphProcessCorder( arguments->sort, edgeList, arguments->lmode);// Corder
break;
default :
edgeList = reorderGraphProcessDegree( arguments->sort, edgeList, arguments->lmode);// out-degree
}
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Total Reorder Complete");
printf(" -----------------------------------------------------\n");
printf("| %-51f | \n", Seconds(timer));
printf(" -----------------------------------------------------\n");
printf(" *****************************************************\n");
free(timer);
return edgeList;
}
struct EdgeList *relabelEdgeList(struct EdgeList *edgeList, uint32_t *labels)
{
uint32_t i;
#pragma omp parallel for
for(i = 0; i < edgeList->num_edges; i++)
{
uint32_t src;
uint32_t dest;
src = edgeList->edges_array_src[i];
dest = edgeList->edges_array_dest[i];
edgeList->edges_array_src[i] = labels[src];
edgeList->edges_array_dest[i] = labels[dest];
}
return edgeList;
}
struct EdgeList *maskEdgeList(struct EdgeList *edgeList, uint32_t *mask_array)
{
uint32_t i;
#pragma omp parallel for
for(i = 0; i < edgeList->num_edges; i++)
{
uint32_t src;
uint32_t dest;
src = edgeList->edges_array_src[i];
dest = edgeList->edges_array_dest[i];
edgeList->edges_array_src[i] = src | mask_array[src];
edgeList->edges_array_dest[i] = dest | mask_array[dest];
}
return edgeList;
}
// ********************************************************************************************
// *************** File relabel **************
// ********************************************************************************************
struct EdgeList *relabelEdgeListFromFile(struct EdgeList *edgeList, const char *fnameb, uint32_t size)
{
FILE *pText;
uint32_t i;
uint32_t v = 0;
uint32_t dest = 0;
uint32_t x = 0;
uint32_t *labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t));
// char *fname_txt = (char *) malloc((strlen(fnameb) + 10) * sizeof(char));
// fname_txt = strcpy (fname_txt, fnameb);
printf("%s\n", fnameb );
pText = fopen(fnameb, "r");
if (pText == NULL)
{
return NULL;
}
while (1)
{
i = fscanf(pText, "%u\n", &dest);
labels[x] = dest;
x++;
if( x == edgeList->num_vertices )
break;
if( i == EOF )
break;
}
fclose(pText);
edgeList = relabelEdgeList(edgeList, labels);
#pragma omp parallel for
for (v = 0; v < edgeList->num_vertices; ++v)
{
edgeList->label_array[v] = labels[edgeList->label_array[v]];
edgeList->inverse_label_array[edgeList->label_array[v]] = v;
}
free(labels);
// free(fname_txt);
return edgeList;
}
void writeLabelsToFile(const char *fnameb, uint32_t *labels, uint32_t size)
{
FILE *fptr;
uint32_t x;
fptr = fopen(fnameb, "w");
for(x = 0; x < size; x++)
{
fprintf(fptr, "%u %u\n", x, labels[x]);
}
fclose(fptr);
}
|
GB_unaryop__minv_fp64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp64_uint32
// op(A') function: GB_tran__minv_fp64_uint32
// C type: double
// A type: uint32_t
// cast: double cij = (double) aij
// unaryop: cij = 1./aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1./x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp64_uint32
(
double *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
#include "ios_error.h"
/*
Typedef declarations.
*/
typedef enum
{
BitwiseAndAssignmentOperator = 0xd9U,
BitwiseOrAssignmentOperator,
LeftShiftAssignmentOperator,
RightShiftAssignmentOperator,
PowerAssignmentOperator,
ModuloAssignmentOperator,
PlusAssignmentOperator,
SubtractAssignmentOperator,
MultiplyAssignmentOperator,
DivideAssignmentOperator,
IncrementAssignmentOperator,
DecrementAssignmentOperator,
LeftShiftOperator,
RightShiftOperator,
LessThanEqualOperator,
GreaterThanEqualOperator,
EqualOperator,
NotEqualOperator,
LogicalAndOperator,
LogicalOrOperator,
ExponentialNotation
} FxOperator;
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
unsigned char
fx_op[2];
fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info));
(void) memset(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=thread_stderr;
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(unsigned char) BitwiseAndAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op);
*fx_op=(unsigned char) BitwiseOrAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op);
*fx_op=(unsigned char) RightShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op);
*fx_op=(unsigned char) PowerAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op);
*fx_op=(unsigned char) ModuloAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op);
*fx_op=(unsigned char) PlusAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op);
*fx_op=(unsigned char) SubtractAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op);
*fx_op=(unsigned char) MultiplyAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op);
*fx_op=(unsigned char) DivideAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op);
*fx_op=(unsigned char) IncrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"++",(char *) fx_op);
*fx_op=(unsigned char) DecrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"--",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op);
*fx_op=(unsigned char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",(char *) fx_op);
*fx_op=(unsigned char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op);
*fx_op=(unsigned char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",(char *) fx_op);
*fx_op=(unsigned char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",(char *) fx_op);
*fx_op=(unsigned char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op);
*fx_op=(unsigned char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op);
*fx_op=(unsigned char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",(char *) fx_op);
*fx_op=(unsigned char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",(char *) fx_op);
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info,
const char *symbol)
{
return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol));
}
static inline MagickBooleanType SetFxSymbolValue(
FxInfo *magick_restrict fx_info,const char *magick_restrict symbol,
double const value)
{
double
*object;
object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (object != (double *) NULL)
{
*object=value;
return(MagickTrue);
}
object=(double *) AcquireMagickMemory(sizeof(*object));
if (object == (double *) NULL)
{
(void) ThrowMagickException(fx_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
fx_info->images->filename);
return(MagickFalse);
}
*object=value;
return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object));
}
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent];
const double
*value;
double
statistic;
register const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType)
(1UL << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=GetFxSymbolValue(fx_info,key);
if (value != (const double *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*(*value));
}
statistic=0.0;
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
statistic=(double) depth;
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=kurtosis;
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=maxima;
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=mean;
}
if (LocaleNCompare(symbol,"median",6) == 0)
{
double
median;
(void) GetImageMedian(image,&median,exception);
statistic=median;
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=minima;
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=skewness;
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=standard_deviation;
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse)
return(0.0);
return(QuantumScale*statistic);
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,const size_t,double *,ExceptionInfo *);
static inline MagickBooleanType IsFxFunction(const char *expression,
const char *name,const size_t length)
{
int
c;
register size_t
i;
for (i=0; i <= length; i++)
if (expression[i] == '\0')
return(MagickFalse);
c=expression[length];
if ((LocaleNCompare(expression,name,length) == 0) &&
((isspace(c) == 0) || (c == '(')))
return(MagickTrue);
return(MagickFalse);
}
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
ExceptionInfo *exception)
{
char
*q,
symbol[MagickPathExtent];
const char
*artifact,
*p;
const double
*value;
double
alpha,
beta;
Image
*image;
MagickBooleanType
status;
PixelInfo
pixel;
PointInfo
point;
register ssize_t
i;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
char
*subexpression;
subexpression=AcquireString(expression);
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
i=(ssize_t) alpha;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x=alpha;
point.y=beta;
if (*p != '\0')
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
subexpression=DestroyString(subexpression);
}
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
i=GetImageIndexInList(image);
GetPixelInfo(image,&pixel);
status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') &&
(LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) &&
(LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
size_t
length;
(void) CopyMagickString(name,p,MagickPathExtent);
length=strlen(name);
for (q=name+length-1; q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
q=name;
if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') &&
(GetFxSymbolValue(fx_info,name) == (const double *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=length;
}
else
{
MagickBooleanType
status;
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,
ConstantString(name),ClonePixelInfo(&pixel));
p+=length;
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case CompositePixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
case IndexPixelChannel:
return(0.0);
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(symbol,"channel",7) != MagickFalse)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(symbol,"extent") == 0)
{
if (image->extent != 0)
return((double) image->extent);
return((double) GetBlobSize(image));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"median",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
if (LocaleCompare(symbol,"printsize.x") == 0)
return(PerceptibleReciprocal(image->resolution.x)*image->columns);
if (LocaleCompare(symbol,"printsize.y") == 0)
return(PerceptibleReciprocal(image->resolution.y)*image->rows);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double) GetImageDepth(image,fx_info->exception));
break;
}
default:
break;
}
value=GetFxSymbolValue(fx_info,symbol);
if (value != (const double *) NULL)
return(*value);
artifact=GetImageArtifact(image,symbol);
if (artifact != (const char *) NULL)
return(StringToDouble(artifact,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UndefinedVariable","`%s'",symbol);
(void) SetFxSymbolValue(fx_info,symbol,0.0);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=(-1);
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while ((c != '\0') && (*expression != '\0'))
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit(c) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((IsFxFunction(expression,"j0",2) != MagickFalse) ||
(IsFxFunction(expression,"j1",2) != MagickFalse))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit(c) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit(c) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case BitwiseAndAssignmentOperator:
case BitwiseOrAssignmentOperator:
case LeftShiftAssignmentOperator:
case RightShiftAssignmentOperator:
case PowerAssignmentOperator:
case ModuloAssignmentOperator:
case PlusAssignmentOperator:
case SubtractAssignmentOperator:
case MultiplyAssignmentOperator:
case DivideAssignmentOperator:
case IncrementAssignmentOperator:
case DecrementAssignmentOperator:
{
precedence=AssignmentPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,const size_t depth,double *beta,
ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
#define FxMaxSubexpressionDepth 200
#define FxReturn(value) \
{ \
subexpression=DestroyString(subexpression); \
return(value); \
}
#define FxParseConditional(subexpression,sentinal,p,q) \
{ \
p=subexpression; \
for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \
if (*q == '(') \
{ \
for (q++; (*q != ')') && (*q != '\0'); q++); \
if (*q == '\0') \
break; \
} \
if (*q == '\0') \
{ \
(void) ThrowMagickException(exception,GetMagickModule(), \
OptionError,"UnableToParseExpression","`%s'",subexpression); \
FxReturn(0.0); \
} \
if (strlen(q) == 1) \
*(q+1)='\0'; \
*q='\0'; \
}
char
*q,
*subexpression;
double
alpha,
gamma,
sans,
value;
register const char
*p;
*beta=0.0;
sans=0.0;
subexpression=AcquireString(expression);
*subexpression='\0';
if (depth > FxMaxSubexpressionDepth)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",expression);
FxReturn(0.0);
}
if (exception->severity >= ErrorException)
FxReturn(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
FxReturn(0.0);
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) (~(size_t) *beta);
FxReturn(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,
depth+1,beta,exception));
FxReturn(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(PerceptibleReciprocal(*beta)*alpha);
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha-(*beta));
}
case BitwiseAndAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case BitwiseOrAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case RightShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PowerAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=pow(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ModuloAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=fmod(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PlusAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case SubtractAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case MultiplyAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DivideAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*PerceptibleReciprocal(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case IncrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DecrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
FxReturn(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
FxReturn(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent-1);
FxParseConditional(subexpression,':',p,q);
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(gamma);
}
case '=':
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,
beta,exception);
FxReturn(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
size_t
length;
if (depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
length=CopyMagickString(subexpression,expression+1,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
FxReturn(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (IsFxFunction(expression,"abs",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(acosh(alpha));
}
#endif
if (IsFxFunction(expression,"acos",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"airy",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(asinh(alpha));
}
#endif
if (IsFxFunction(expression,"asin",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(asin(alpha));
}
if (IsFxFunction(expression,"alt",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atanh(alpha));
}
#endif
if (IsFxFunction(expression,"atan",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(expression,"ceil",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(ceil(alpha));
}
if (IsFxFunction(expression,"clamp",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha < 0.0)
FxReturn(0.0);
if (alpha > 1.0)
FxReturn(1.0);
FxReturn(alpha);
}
if (IsFxFunction(expression,"cosh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(cosh(alpha));
}
if (IsFxFunction(expression,"cos",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'D':
case 'd':
{
if (IsFxFunction(expression,"debug",5) != MagickFalse)
{
const char
*type;
size_t
length;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
switch (fx_info->images->colorspace)
{
case CMYKColorspace:
{
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="alpha"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
break;
}
case GRAYColorspace:
{
switch (channel)
{
case RedPixelChannel: type="gray"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
default:
{
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
}
*subexpression='\0';
length=1;
if (strlen(expression) > 6)
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
FxReturn(alpha);
}
if (IsFxFunction(expression,"do",2) != MagickFalse)
{
size_t
length;
/*
Parse do(expression,condition test).
*/
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
}
FxReturn(alpha);
}
if (IsFxFunction(expression,"drc",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
FxReturn(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (IsFxFunction(expression,"erf",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(erf(alpha));
}
#endif
if (IsFxFunction(expression,"exp",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
FxReturn(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (IsFxFunction(expression,"floor",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"for",3) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
/*
Parse for(initialization, condition test, expression).
*/
length=CopyMagickString(subexpression,expression+4,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
}
FxReturn(alpha);
}
break;
}
case 'G':
case 'g':
{
if (IsFxFunction(expression,"gauss",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI));
}
if (IsFxFunction(expression,"gcd",3) != MagickFalse)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
FxReturn((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleCompare(expression,"hue") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"hypot",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'I':
case 'i':
{
if (IsFxFunction(expression,"if",2) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
if (fabs(alpha) >= MagickEpsilon)
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(alpha);
}
if (LocaleCompare(expression,"intensity") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"int",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"isnan",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (IsFxFunction(expression,"j0",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"j1",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"jinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha)));
}
#endif
break;
}
case 'L':
case 'l':
{
if (IsFxFunction(expression,"ln",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(log(alpha));
}
if (IsFxFunction(expression,"logtwo",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn(log10(alpha)/log10(2.0));
}
if (IsFxFunction(expression,"log",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
FxReturn(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (IsFxFunction(expression,"max",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (IsFxFunction(expression,"min",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha < *beta ? alpha : *beta);
}
if (IsFxFunction(expression,"mod",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta));
}
if (LocaleCompare(expression,"m") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'N':
case 'n':
{
if (IsFxFunction(expression,"not",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
FxReturn(1.0);
if (LocaleCompare(expression,"o") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
FxReturn(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
FxReturn(MagickPI);
if (IsFxFunction(expression,"pow",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
FxReturn(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
FxReturn(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (IsFxFunction(expression,"rand",4) != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
FxReturn(alpha);
}
if (IsFxFunction(expression,"round",5) != MagickFalse)
{
/*
Round the fraction to nearest integer.
*/
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if ((alpha-floor(alpha)) < (ceil(alpha)-alpha))
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"r") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"sign",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(alpha < 0.0 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"sinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0)
FxReturn(1.0);
FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha));
}
if (IsFxFunction(expression,"sinh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sinh(alpha));
}
if (IsFxFunction(expression,"sin",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(sin(alpha));
}
if (IsFxFunction(expression,"sqrt",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sqrt(alpha));
}
if (IsFxFunction(expression,"squish",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'T':
case 't':
{
if (IsFxFunction(expression,"tanh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(tanh(alpha));
}
if (IsFxFunction(expression,"tan",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
FxReturn(0.0);
if (IsFxFunction(expression,"trunc",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha >= 0.0)
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'W':
case 'w':
{
if (IsFxFunction(expression,"while",5) != MagickFalse)
{
size_t
length;
/*
Parse while(condition test, expression).
*/
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,
beta,exception);
}
FxReturn(alpha);
}
if (LocaleCompare(expression,"w") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
default:
break;
}
subexpression=DestroyString(subexpression);
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception);
FxReturn(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
double
alpha;
FxInfo
**fx_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) memset(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FxImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
|
reduction_plus_2.c | #include <stdlib.h>
#include <stdio.h>
#include <omp.h>
int main()
{
int result = 100;
#pragma omp parallel reduction(+:result)
{
int rank = omp_get_thread_num();
result += rank;
}
printf("Result: %d\n", result);
}
|
conv1x1s1_sgemm_pack4_neon_interleave.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv1x1s1_sgemm_pack4_neon_interleave(const Mat& bottom_blob, Mat& top_blob, const Option& opt,
int outch, int inch)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
// interleave
Mat tmp = top_blob;
#if __aarch64__
tmp.create(12, inch, size/12 + (size%12)/8 + (size%12%8)/4 + (size%12%4)/2 + size%12%2, elemsize, elempack);
#else
tmp.create(8, inch, size/8 + (size%8)/4 + (size%4)/2 + size%2, elemsize, elempack);
#endif
{
int nn_size;
int remain_size_start;
#if __aarch64__
nn_size = size / 12;
remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 12;
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
float* tmpptr = tmp.channel(i/12);
for (int q=0; q<inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v8.4s}, [%1], #16 \n"
"sub %0, %0, #128 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v11.4s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
img0 += bottom_blob.cstep * 4;
}
}
#else
remain_size_start = 0;
#endif
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
float* tmpptr = tmp.channel(i/12+(i%12)/8);
#else
float* tmpptr = tmp.channel(i/8);
#endif
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"sub %0, %0, #64 \n"
"st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"
);
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d0-d7} \n"
"pld [%0, #512] \n"
"vldm %0, {d16-d23} \n"
// transpose 8x4
"vtrn.32 q0, q1 \n"
"vtrn.32 q2, q3 \n"
"vtrn.32 q8, q9 \n"
"vtrn.32 q10, q11 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vswp d17, d20 \n"
"vswp d19, d22 \n"
"vswp q1, q8 \n"
"vswp q3, q10 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"sub %0, %0, #64 \n"
"vst1.f32 {d4-d7}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
float* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4);
#else
float* tmpptr = tmp.channel(i/8 + (i%8)/4);
#endif
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3"
);
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n"
"vstm %1!, {d0-d7} \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
float* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2);
#else
float* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2);
#endif
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.4s, v1.4s}, [%0] \n"
"st1 {v0.4s, v1.4s}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1"
);
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d0-d3}, [%0 :128] \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i*4;
#if __aarch64__
float* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2);
#else
float* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2 + i%2);
#endif
for (int q=0; q<inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0"
);
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0"
);
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
}
}
}
|
trmv_c_csc_n_hi_conj.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <string.h>
#include <memory.h>
static alphasparse_status_t
trmv_csc_n_hi_conj_unroll4(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y,
ALPHA_INT lrs,
ALPHA_INT lre)
{
ALPHA_INT m = A->cols;
for (ALPHA_INT i = lrs; i < lre; i++)
{
register ALPHA_Number tmp0;
register ALPHA_Number tmp1;
register ALPHA_Number tmp2;
register ALPHA_Number tmp3;
alpha_setzero(tmp0);
alpha_setzero(tmp1);
alpha_setzero(tmp2);
alpha_setzero(tmp3);
ALPHA_INT pks = A->cols_start[i];
ALPHA_INT pke = A->cols_end[i];
ALPHA_INT pkl = pke - pks;
ALPHA_INT pkl4 = pkl - 4;
ALPHA_INT row_ind0, row_ind1, row_ind2, row_ind3;
ALPHA_Number *A_val = &A->values[pks];
ALPHA_INT *A_row = &A->row_indx[pks];
ALPHA_INT pi;
for (pi = 0; pi < pkl4; pi += 4)
{
ALPHA_Number conj0, conj1, conj2, conj3;
row_ind0 = A_row[pi];
row_ind1 = A_row[pi + 1];
row_ind2 = A_row[pi + 2];
row_ind3 = A_row[pi + 3];
alpha_conj(conj0, A_val[pi]);
alpha_conj(conj1, A_val[pi+1]);
alpha_conj(conj2, A_val[pi+2]);
alpha_conj(conj3, A_val[pi+3]);
if (row_ind3 <= i){
alpha_madde(tmp0, conj0, x[row_ind0])
alpha_madde(tmp1, conj1, x[row_ind1]);
alpha_madde(tmp2, conj2, x[row_ind2]);
alpha_madde(tmp3, conj3, x[row_ind3]);
}else if (row_ind2 <= i){
alpha_madde(tmp1, A_val[pi], x[row_ind0]);
alpha_madde(tmp2, conj1, x[row_ind1]);
alpha_madde(tmp3, conj2, x[row_ind2]);
}else if (row_ind1 <= i){
alpha_madde(tmp2, A_val[pi], x[row_ind0]);
alpha_madde(tmp3, conj1, x[row_ind1]);
}else if (row_ind0 <= i){
alpha_madde(tmp3, A_val[pi], x[row_ind0]);
}
}
for (; pi < pkl; pi += 1)
{
if (A_row[pi] <= i)
{
ALPHA_Number conj0;
alpha_conj(conj0, A_val[pi]);
alpha_madde(tmp0, conj0, x[A_row[pi]]);
}
}
alpha_add(tmp0, tmp0, tmp1);
alpha_add(tmp2, tmp2, tmp3);
alpha_add(tmp0, tmp0, tmp2);
alpha_mul(tmp0, tmp0, alpha);
alpha_mul(tmp1, beta, y[i]);
alpha_add(y[i], tmp0, tmp1);
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
static alphasparse_status_t
trmv_csc_n_hi_conj_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
ALPHA_INT n = A->cols;
ALPHA_INT num_threads = alpha_get_thread_num();
ALPHA_INT partition[num_threads + 1];
balanced_partition_row_by_nnz(A->cols_end, n, num_threads, partition);
#ifdef _OPENMP
#pragma omp parallel num_threads(num_threads)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT local_n_s = partition[tid];
ALPHA_INT local_n_e = partition[tid + 1];
trmv_csc_n_hi_conj_unroll4(alpha,A,x,beta,y,local_n_s,local_n_e);
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSC *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return trmv_csc_n_hi_conj_omp(alpha, A, x, beta, y);
}
|
par_rap.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
#include "_hypre_utilities.h"
#include "hypre_hopscotch_hash.h"
/*--------------------------------------------------------------------------
* OLD NOTES:
* Sketch of John's code to build RAP
*
* Uses two integer arrays icg and ifg as marker arrays
*
* icg needs to be of size n_fine; size of ia.
* A negative value of icg(i) indicates i is a f-point, otherwise
* icg(i) is the converts from fine to coarse grid orderings.
* Note that I belive the code assumes that if i<j and both are
* c-points, then icg(i) < icg(j).
* ifg needs to be of size n_coarse; size of irap
* I don't think it has meaning as either input or output.
*
* In the code, both the interpolation and restriction operator
* are stored row-wise in the array b. If i is a f-point,
* ib(i) points the row of the interpolation operator for point
* i. If i is a c-point, ib(i) points the row of the restriction
* operator for point i.
*
* In the CSR storage for rap, its guaranteed that the rows will
* be ordered ( i.e. ic<jc -> irap(ic) < irap(jc)) but I don't
* think there is a guarantee that the entries within a row will
* be ordered in any way except that the diagonal entry comes first.
*
* As structured now, the code requires that the size of rap be
* predicted up front. To avoid this, one could execute the code
* twice, the first time would only keep track of icg ,ifg and ka.
* Then you would know how much memory to allocate for rap and jrap.
* The second time would fill in these arrays. Actually you might
* be able to include the filling in of jrap into the first pass;
* just overestimate its size (its an integer array) and cut it
* back before the second time through. This would avoid some if tests
* in the second pass.
*
* Questions
* 1) parallel (PetSc) version?
* 2) what if we don't store R row-wise and don't
* even want to store a copy of it in this form
* temporarily?
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_ExchangeRAPData( hypre_CSRMatrix *RAP_int,
hypre_ParCSRCommPkg *comm_pkg_RT)
{
HYPRE_Int *RAP_int_i;
HYPRE_BigInt *RAP_int_j = NULL;
HYPRE_Real *RAP_int_data = NULL;
HYPRE_Int num_cols = 0;
MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_RT);
HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT);
HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_RT);
HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_RT);
HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_RT);
HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_RT);
HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT);
hypre_CSRMatrix *RAP_ext;
HYPRE_Int *RAP_ext_i;
HYPRE_BigInt *RAP_ext_j = NULL;
HYPRE_Real *RAP_ext_data = NULL;
hypre_ParCSRCommHandle *comm_handle = NULL;
hypre_ParCSRCommPkg *tmp_comm_pkg;
HYPRE_Int *jdata_recv_vec_starts;
HYPRE_Int *jdata_send_map_starts;
HYPRE_Int num_rows;
HYPRE_Int num_nonzeros;
HYPRE_Int i, j;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm,&num_procs);
RAP_ext_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends]+1, HYPRE_MEMORY_HOST);
jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
/*--------------------------------------------------------------------------
* recompute RAP_int_i so that RAP_int_i[j+1] contains the number of
* elements of row j (to be determined through send_map_elmnts on the
* receiving end)
*--------------------------------------------------------------------------*/
if (num_recvs)
{
RAP_int_i = hypre_CSRMatrixI(RAP_int);
RAP_int_j = hypre_CSRMatrixBigJ(RAP_int);
RAP_int_data = hypre_CSRMatrixData(RAP_int);
num_cols = hypre_CSRMatrixNumCols(RAP_int);
}
jdata_recv_vec_starts[0] = 0;
for (i=0; i < num_recvs; i++)
{
jdata_recv_vec_starts[i+1] = RAP_int_i[recv_vec_starts[i+1]];
}
for (i=num_recvs; i > 0; i--)
for (j = recv_vec_starts[i]; j > recv_vec_starts[i-1]; j--)
RAP_int_i[j] -= RAP_int_i[j-1];
/*--------------------------------------------------------------------------
* initialize communication
*--------------------------------------------------------------------------*/
if (num_recvs && num_sends)
comm_handle = hypre_ParCSRCommHandleCreate(12,comm_pkg_RT,
&RAP_int_i[1], &RAP_ext_i[1]);
else if (num_recvs)
comm_handle = hypre_ParCSRCommHandleCreate(12,comm_pkg_RT,
&RAP_int_i[1], NULL);
else if (num_sends)
comm_handle = hypre_ParCSRCommHandleCreate(12,comm_pkg_RT,
NULL, &RAP_ext_i[1]);
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = recv_procs;
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = send_procs;
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
/*--------------------------------------------------------------------------
* compute num_nonzeros for RAP_ext
*--------------------------------------------------------------------------*/
for (i=0; i < num_sends; i++)
for (j = send_map_starts[i]; j < send_map_starts[i+1]; j++)
RAP_ext_i[j+1] += RAP_ext_i[j];
num_rows = send_map_starts[num_sends];
num_nonzeros = RAP_ext_i[num_rows];
if (num_nonzeros)
{
RAP_ext_j = hypre_TAlloc(HYPRE_BigInt, num_nonzeros, HYPRE_MEMORY_HOST);
RAP_ext_data = hypre_TAlloc(HYPRE_Real, num_nonzeros, HYPRE_MEMORY_HOST);
}
for (i=0; i < num_sends+1; i++)
{
jdata_send_map_starts[i] = RAP_ext_i[send_map_starts[i]];
}
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_send_map_starts;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_recv_vec_starts;
comm_handle = hypre_ParCSRCommHandleCreate(1,tmp_comm_pkg,RAP_int_data,
RAP_ext_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
comm_handle = hypre_ParCSRCommHandleCreate(21,tmp_comm_pkg,RAP_int_j,
RAP_ext_j);
RAP_ext = hypre_CSRMatrixCreate(num_rows,num_cols,num_nonzeros);
hypre_CSRMatrixI(RAP_ext) = RAP_ext_i;
if (num_nonzeros)
{
hypre_CSRMatrixBigJ(RAP_ext) = RAP_ext_j;
hypre_CSRMatrixData(RAP_ext) = RAP_ext_data;
}
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
hypre_TFree(jdata_recv_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(jdata_send_map_starts, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST);
return RAP_ext;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGBuildCoarseOperator
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildCoarseOperator( hypre_ParCSRMatrix *RT,
hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *P,
hypre_ParCSRMatrix **RAP_ptr )
{
hypre_BoomerAMGBuildCoarseOperatorKT( RT, A, P, 0, RAP_ptr);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGBuildCoarseOperatorKT( hypre_ParCSRMatrix *RT,
hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *P,
HYPRE_Int keepTranspose,
hypre_ParCSRMatrix **RAP_ptr )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RAP] -= hypre_MPI_Wtime();
#endif
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *RT_diag = hypre_ParCSRMatrixDiag(RT);
hypre_CSRMatrix *RT_offd = hypre_ParCSRMatrixOffd(RT);
HYPRE_Int num_cols_diag_RT = hypre_CSRMatrixNumCols(RT_diag);
HYPRE_Int num_cols_offd_RT = hypre_CSRMatrixNumCols(RT_offd);
HYPRE_Int num_rows_offd_RT = hypre_CSRMatrixNumRows(RT_offd);
hypre_ParCSRCommPkg *comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT);
HYPRE_Int num_recvs_RT = 0;
HYPRE_Int num_sends_RT = 0;
HYPRE_Int *send_map_starts_RT;
HYPRE_Int *send_map_elmts_RT;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P);
HYPRE_Real *P_diag_data = hypre_CSRMatrixData(P_diag);
HYPRE_Int *P_diag_i = hypre_CSRMatrixI(P_diag);
HYPRE_Int *P_diag_j = hypre_CSRMatrixJ(P_diag);
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
HYPRE_BigInt *col_map_offd_P = hypre_ParCSRMatrixColMapOffd(P);
HYPRE_Real *P_offd_data = hypre_CSRMatrixData(P_offd);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_BigInt first_col_diag_P = hypre_ParCSRMatrixFirstColDiag(P);
HYPRE_BigInt last_col_diag_P;
HYPRE_Int num_cols_diag_P = hypre_CSRMatrixNumCols(P_diag);
HYPRE_Int num_cols_offd_P = hypre_CSRMatrixNumCols(P_offd);
HYPRE_BigInt *coarse_partitioning = hypre_ParCSRMatrixColStarts(P);
HYPRE_BigInt *RT_partitioning = hypre_ParCSRMatrixColStarts(RT);
hypre_ParCSRMatrix *RAP;
HYPRE_BigInt *col_map_offd_RAP = NULL;
HYPRE_BigInt *new_col_map_offd_RAP = NULL;
hypre_CSRMatrix *RAP_int = NULL;
HYPRE_Real *RAP_int_data;
HYPRE_Int *RAP_int_i;
HYPRE_BigInt *RAP_int_j;
hypre_CSRMatrix *RAP_ext;
HYPRE_Real *RAP_ext_data = NULL;
HYPRE_Int *RAP_ext_i = NULL;
HYPRE_BigInt *RAP_ext_j = NULL;
hypre_CSRMatrix *RAP_diag;
HYPRE_Real *RAP_diag_data;
HYPRE_Int *RAP_diag_i;
HYPRE_Int *RAP_diag_j;
hypre_CSRMatrix *RAP_offd;
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_i = NULL;
HYPRE_Int *RAP_offd_j = NULL;
HYPRE_Int RAP_size;
HYPRE_Int RAP_ext_size;
HYPRE_Int RAP_diag_size;
HYPRE_Int RAP_offd_size;
HYPRE_Int P_ext_diag_size;
HYPRE_Int P_ext_offd_size;
HYPRE_BigInt first_col_diag_RAP;
HYPRE_BigInt last_col_diag_RAP;
HYPRE_Int num_cols_offd_RAP = 0;
hypre_CSRMatrix *R_diag;
HYPRE_Real *R_diag_data;
HYPRE_Int *R_diag_i;
HYPRE_Int *R_diag_j;
hypre_CSRMatrix *R_offd;
HYPRE_Real *R_offd_data;
HYPRE_Int *R_offd_i;
HYPRE_Int *R_offd_j;
HYPRE_Real *RA_diag_data_array = NULL;
HYPRE_Int *RA_diag_j_array = NULL;
HYPRE_Real *RA_offd_data_array = NULL;
HYPRE_Int *RA_offd_j_array = NULL;
hypre_CSRMatrix *Ps_ext;
HYPRE_Real *Ps_ext_data;
HYPRE_Int *Ps_ext_i;
HYPRE_BigInt *Ps_ext_j;
HYPRE_Real *P_ext_diag_data = NULL;
HYPRE_Int *P_ext_diag_i = NULL;
HYPRE_Int *P_ext_diag_j = NULL;
HYPRE_Real *P_ext_offd_data = NULL;
HYPRE_Int *P_ext_offd_i = NULL;
HYPRE_Int *P_ext_offd_j = NULL;
HYPRE_BigInt *P_big_offd_j = NULL;
HYPRE_BigInt *col_map_offd_Pext;
HYPRE_Int *map_P_to_Pext = NULL;
HYPRE_Int *map_P_to_RAP = NULL;
HYPRE_Int *map_Pext_to_RAP = NULL;
HYPRE_Int *P_marker;
HYPRE_Int **P_mark_array;
HYPRE_Int **A_mark_array;
HYPRE_Int *A_marker;
HYPRE_BigInt *temp;
HYPRE_BigInt n_coarse, n_coarse_RT;
HYPRE_Int square = 1;
HYPRE_Int num_cols_offd_Pext = 0;
HYPRE_Int ic, i, j, k;
HYPRE_Int i1, i2, i3, ii, ns, ne, size, rest;
HYPRE_Int cnt = 0; /*value; */
HYPRE_Int jj1, jj2, jj3, jcol;
HYPRE_Int *jj_count, *jj_cnt_diag, *jj_cnt_offd;
HYPRE_Int jj_counter, jj_count_diag, jj_count_offd;
HYPRE_Int jj_row_begining, jj_row_begin_diag, jj_row_begin_offd;
HYPRE_Int start_indexing = 0; /* start indexing for RAP_data at 0 */
HYPRE_Int num_nz_cols_A;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Real r_entry;
HYPRE_Real r_a_product;
HYPRE_Real r_a_p_product;
HYPRE_Real zero = 0.0;
HYPRE_Int *prefix_sum_workspace;
/*-----------------------------------------------------------------------
* Copy ParCSRMatrix RT into CSRMatrix R so that we have row-wise access
* to restriction .
*-----------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm,&num_procs);
num_threads = hypre_NumThreads();
if (comm_pkg_RT)
{
num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT);
num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT);
send_map_starts_RT =hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT);
send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT);
}
else if (num_procs > 1)
{
hypre_MatvecCommPkgCreate(RT);
comm_pkg_RT = hypre_ParCSRMatrixCommPkg(RT);
num_recvs_RT = hypre_ParCSRCommPkgNumRecvs(comm_pkg_RT);
num_sends_RT = hypre_ParCSRCommPkgNumSends(comm_pkg_RT);
send_map_starts_RT =hypre_ParCSRCommPkgSendMapStarts(comm_pkg_RT);
send_map_elmts_RT = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_RT);
}
hypre_CSRMatrixTranspose(RT_diag,&R_diag,1);
if (num_cols_offd_RT)
{
hypre_CSRMatrixTranspose(RT_offd,&R_offd,1);
R_offd_data = hypre_CSRMatrixData(R_offd);
R_offd_i = hypre_CSRMatrixI(R_offd);
R_offd_j = hypre_CSRMatrixJ(R_offd);
}
/*-----------------------------------------------------------------------
* Access the CSR vectors for R. Also get sizes of fine and
* coarse grids.
*-----------------------------------------------------------------------*/
R_diag_data = hypre_CSRMatrixData(R_diag);
R_diag_i = hypre_CSRMatrixI(R_diag);
R_diag_j = hypre_CSRMatrixJ(R_diag);
n_coarse = hypre_ParCSRMatrixGlobalNumCols(P);
num_nz_cols_A = num_cols_diag_A + num_cols_offd_A;
n_coarse_RT = hypre_ParCSRMatrixGlobalNumCols(RT);
if (n_coarse != n_coarse_RT)
square = 0;
/*-----------------------------------------------------------------------
* Generate Ps_ext, i.e. portion of P that is stored on neighbor procs
* and needed locally for triple matrix product
*-----------------------------------------------------------------------*/
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedIntMap send_map_elmts_RT_inverse_map;
HYPRE_Int *send_map_elmts_starts_RT_aggregated = NULL;
HYPRE_Int *send_map_elmts_RT_aggregated = NULL;
HYPRE_Int send_map_elmts_RT_inverse_map_initialized =
num_sends_RT > 0 && send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0] > 0;
if (send_map_elmts_RT_inverse_map_initialized)
{
hypre_UnorderedIntSet send_map_elmts_set;
hypre_UnorderedIntSetCreate(&send_map_elmts_set, 2*(send_map_starts_RT[num_sends_RT] - send_map_starts_RT[0]), 16*hypre_NumThreads());
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++)
{
HYPRE_Int key = send_map_elmts_RT[i];
hypre_UnorderedIntSetPut(&send_map_elmts_set, key);
}
HYPRE_Int send_map_elmts_unique_size;
HYPRE_Int *send_map_elmts_unique = hypre_UnorderedIntSetCopyToArray(&send_map_elmts_set, &send_map_elmts_unique_size);
hypre_UnorderedIntSetDestroy(&send_map_elmts_set);
hypre_UnorderedIntMapCreate(&send_map_elmts_RT_inverse_map, 2*send_map_elmts_unique_size, 16*hypre_NumThreads());
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < send_map_elmts_unique_size; i++)
{
hypre_UnorderedIntMapPutIfAbsent(&send_map_elmts_RT_inverse_map, send_map_elmts_unique[i], i);
}
hypre_TFree(send_map_elmts_unique, HYPRE_MEMORY_HOST);
send_map_elmts_starts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_elmts_unique_size + 1, HYPRE_MEMORY_HOST);
send_map_elmts_RT_aggregated = hypre_TAlloc(HYPRE_Int, send_map_starts_RT[num_sends_RT], HYPRE_MEMORY_HOST);
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < send_map_elmts_unique_size; i++)
{
send_map_elmts_starts_RT_aggregated[i] = 0;
}
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = send_map_starts_RT[0]; i < send_map_starts_RT[num_sends_RT]; i++)
{
HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]);
#pragma omp atomic
send_map_elmts_starts_RT_aggregated[idx]++;
}
for (i = 0; i < send_map_elmts_unique_size - 1; i++)
{
send_map_elmts_starts_RT_aggregated[i + 1] += send_map_elmts_starts_RT_aggregated[i];
}
send_map_elmts_starts_RT_aggregated[send_map_elmts_unique_size] = send_map_starts_RT[num_sends_RT];
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = send_map_starts_RT[num_sends_RT] - 1; i >= send_map_starts_RT[0]; i--)
{
HYPRE_Int idx = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, send_map_elmts_RT[i]);
HYPRE_Int offset = hypre_fetch_and_add(send_map_elmts_starts_RT_aggregated + idx, -1) - 1;
send_map_elmts_RT_aggregated[offset] = i;
}
}
#endif /* HYPRE_CONCURRENT_HOPSCOTCH */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime();
#endif
if (num_procs > 1)
{
Ps_ext = hypre_ParCSRMatrixExtractBExt(P,A,1);
Ps_ext_data = hypre_CSRMatrixData(Ps_ext);
Ps_ext_i = hypre_CSRMatrixI(Ps_ext);
Ps_ext_j = hypre_CSRMatrixBigJ(Ps_ext);
}
P_ext_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST);
P_ext_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_offd_A+1, HYPRE_MEMORY_HOST);
P_ext_diag_i[0] = 0;
P_ext_offd_i[0] = 0;
P_ext_diag_size = 0;
P_ext_offd_size = 0;
last_col_diag_P = first_col_diag_P + (HYPRE_BigInt) num_cols_diag_P - 1;
/*HYPRE_Int prefix_sum_workspace[2*(num_threads + 1)];*/
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(num_threads + 1), HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j)
#endif /* This threading causes problem, maybe the prefix_sum in combination with BigInt? */
{
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_A);
HYPRE_Int P_ext_diag_size_private = 0;
HYPRE_Int P_ext_offd_size_private = 0;
for (i = i_begin; i < i_end; i++)
{
for (j=Ps_ext_i[i]; j < Ps_ext_i[i+1]; j++)
if (Ps_ext_j[j] < first_col_diag_P || Ps_ext_j[j] > last_col_diag_P)
P_ext_offd_size_private++;
else
P_ext_diag_size_private++;
}
hypre_prefix_sum_pair(&P_ext_diag_size_private, &P_ext_diag_size, &P_ext_offd_size_private, &P_ext_offd_size, prefix_sum_workspace);
#ifdef HYPRE_USING_OPENMP
#pragma omp master
#endif
{
if (P_ext_diag_size)
{
P_ext_diag_j = hypre_CTAlloc(HYPRE_Int, P_ext_diag_size, HYPRE_MEMORY_HOST);
P_ext_diag_data = hypre_CTAlloc(HYPRE_Real, P_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (P_ext_offd_size)
{
P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size, HYPRE_MEMORY_HOST);
P_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size, HYPRE_MEMORY_HOST);
P_ext_offd_data = hypre_CTAlloc(HYPRE_Real, P_ext_offd_size, HYPRE_MEMORY_HOST);
//temp = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size+num_cols_offd_P, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = i_begin; i < i_end; i++)
{
for (j=Ps_ext_i[i]; j < Ps_ext_i[i+1]; j++)
{
HYPRE_BigInt value = Ps_ext_j[j];
if (value < first_col_diag_P || value > last_col_diag_P)
{
//Ps_ext_j[P_ext_offd_size_private] = value;
//temp[P_ext_offd_size_private] = value;
P_big_offd_j[P_ext_offd_size_private] = value;
P_ext_offd_data[P_ext_offd_size_private++] = Ps_ext_data[j];
}
else
{
P_ext_diag_j[P_ext_diag_size_private] = (HYPRE_Int)(Ps_ext_j[j] - first_col_diag_P);
P_ext_diag_data[P_ext_diag_size_private++] = Ps_ext_data[j];
}
}
P_ext_diag_i[i+1] = P_ext_diag_size_private;
P_ext_offd_i[i+1] = P_ext_offd_size_private;
}
} /* omp parallel */
hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Ps_ext);
Ps_ext = NULL;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (P_ext_offd_size || num_cols_offd_P)
{
hypre_UnorderedBigIntSet found_set;
hypre_UnorderedBigIntSetCreate(&found_set, P_ext_offd_size + num_cols_offd_P, 16*hypre_NumThreads());
#pragma omp parallel private(i)
{
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < P_ext_offd_size; i++)
{
//hypre_UnorderedBigIntSetPut(&found_set, Ps_ext_j[i]);
hypre_UnorderedBigIntSetPut(&found_set, P_big_offd_j[i]);
}
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_offd_P; i++)
{
hypre_UnorderedBigIntSetPut(&found_set, col_map_offd_P[i]);
}
} /* omp parallel */
/* Warning on getting temp right !!!!! */
temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_Pext);
hypre_UnorderedBigIntSetDestroy(&found_set);
hypre_UnorderedBigIntMap col_map_offd_Pext_inverse;
hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_Pext, &col_map_offd_Pext, &col_map_offd_Pext_inverse);
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i=0 ; i < P_ext_offd_size; i++)
//Ps_ext_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, Ps_ext_j[i]);
P_ext_offd_j[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_Pext_inverse, P_big_offd_j[i]);
if (num_cols_offd_Pext) hypre_UnorderedBigIntMapDestroy(&col_map_offd_Pext_inverse);
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (P_ext_offd_size || num_cols_offd_P)
{
temp = hypre_CTAlloc(HYPRE_BigInt, P_ext_offd_size+num_cols_offd_P, HYPRE_MEMORY_HOST);
for (i=0; i < P_ext_offd_size; i++)
//Ps_ext_j[i] = temp[i];
//temp[i] = Ps_ext_j[i];
temp[i] = P_big_offd_j[i];
cnt = P_ext_offd_size;
for (i=0; i < num_cols_offd_P; i++)
temp[cnt++] = col_map_offd_P[i];
}
if (cnt)
{
hypre_BigQsort0(temp, 0, cnt-1);
num_cols_offd_Pext = 1;
HYPRE_BigInt value = temp[0];
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_Pext++] = value;
}
}
}
if (num_cols_offd_Pext)
col_map_offd_Pext = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_Pext, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd_Pext; i++)
col_map_offd_Pext[i] = temp[i];
if (P_ext_offd_size || num_cols_offd_P)
hypre_TFree(temp, HYPRE_MEMORY_HOST);
/*if (P_ext_offd_size)
P_ext_offd_j = hypre_CTAlloc(HYPRE_Int, P_ext_offd_size, HYPRE_MEMORY_HOST);*/
for (i=0 ; i < P_ext_offd_size; i++)
P_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_Pext,
//Ps_ext_j[i],
P_big_offd_j[i],
num_cols_offd_Pext);
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (P_ext_offd_size)
hypre_TFree(P_big_offd_j, HYPRE_MEMORY_HOST);
/*if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Ps_ext);
Ps_ext = NULL;
}*/
if (num_cols_offd_P)
{
map_P_to_Pext = hypre_CTAlloc(HYPRE_Int, num_cols_offd_P, HYPRE_MEMORY_HOST);
cnt = 0;
for (i=0; i < num_cols_offd_Pext; i++)
if (col_map_offd_Pext[i] == col_map_offd_P[cnt])
{
map_P_to_Pext[cnt++] = i;
if (cnt == num_cols_offd_P) break;
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime();
#endif
/*-----------------------------------------------------------------------
* First Pass: Determine size of RAP_int and set up RAP_int_i if there
* are more than one processor and nonzero elements in R_offd
*-----------------------------------------------------------------------*/
P_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads, HYPRE_MEMORY_HOST);
A_mark_array = hypre_CTAlloc(HYPRE_Int *, num_threads, HYPRE_MEMORY_HOST);
if (num_cols_offd_RT)
{
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_offd_RT/num_threads;
rest = num_cols_offd_RT - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
/*-----------------------------------------------------------------------
* Allocate marker arrays.
*-----------------------------------------------------------------------*/
if (num_cols_offd_Pext || num_cols_diag_P)
{
P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_Pext, HYPRE_MEMORY_HOST);
P_marker = P_mark_array[ii];
}
A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST);
A_marker = A_mark_array[ii];
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A; i++)
{
A_marker[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over exterior c-points
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
jj_row_begining = jj_counter;
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_offd.
*--------------------------------------------------------------------*/
for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++)
{
i1 = R_offd_j[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_ext.
*-----------------------------------------------------------*/
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)
{
i3 = P_ext_offd_j[jj3] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2+num_cols_offd_A] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2+num_cols_offd_A] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_diag.
*-----------------------------------------------------------*/
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)
{
i3 = P_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_offd.
*-----------------------------------------------------------*/
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)
{
i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
jj_counter++;
}
}
}
}
}
}
jj_count[ii] = jj_counter;
}
/*-----------------------------------------------------------------------
* Allocate RAP_int_data and RAP_int_j arrays.
*-----------------------------------------------------------------------*/
for (i = 0; i < num_threads-1; i++)
jj_count[i+1] += jj_count[i];
RAP_size = jj_count[num_threads-1];
RAP_int_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RT+1, HYPRE_MEMORY_HOST);
RAP_int_data = hypre_CTAlloc(HYPRE_Real, RAP_size, HYPRE_MEMORY_HOST);
RAP_int_j = hypre_CTAlloc(HYPRE_BigInt, RAP_size, HYPRE_MEMORY_HOST);
RAP_int_i[num_cols_offd_RT] = RAP_size;
/*-----------------------------------------------------------------------
* Second Pass: Fill in RAP_int_data and RAP_int_j.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_counter,jj_row_begining,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_offd_RT/num_threads;
rest = num_cols_offd_RT - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
if (num_cols_offd_Pext || num_cols_diag_P)
P_marker = P_mark_array[ii];
A_marker = A_mark_array[ii];
jj_counter = start_indexing;
if (ii > 0) jj_counter = jj_count[ii-1];
for (ic = 0; ic < num_cols_diag_P+num_cols_offd_Pext; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A; i++)
{
A_marker[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over exterior c-points.
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
jj_row_begining = jj_counter;
RAP_int_i[ic] = jj_counter;
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_offd.
*--------------------------------------------------------------------*/
for (jj1 = R_offd_i[ic]; jj1 < R_offd_i[ic+1]; jj1++)
{
i1 = R_offd_j[jj1];
r_entry = R_offd_data[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
r_a_product = r_entry * A_offd_data[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_ext.
*-----------------------------------------------------------*/
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
r_a_p_product = r_a_product * P_ext_diag_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P;
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)
{
i3 = P_ext_offd_j[jj3] + num_cols_diag_P;
r_a_p_product = r_a_product * P_ext_offd_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter]
= col_map_offd_Pext[i3-num_cols_diag_P];
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RAP and can just add new contributions.
*--------------------------------------------------------------*/
else
{
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
r_a_p_product = r_a_product * P_ext_diag_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)
{
i3 = P_ext_offd_j[jj3] + num_cols_diag_P;
r_a_p_product = r_a_product * P_ext_offd_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
r_a_product = r_entry * A_diag_data[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2+num_cols_offd_A] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2+num_cols_offd_A] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_diag.
*-----------------------------------------------------------*/
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)
{
i3 = P_diag_j[jj3];
r_a_p_product = r_a_product * P_diag_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter] = (HYPRE_BigInt)i3 + first_col_diag_P;
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)
{
i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;
r_a_p_product = r_a_product * P_offd_data[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begining)
{
P_marker[i3] = jj_counter;
RAP_int_data[jj_counter] = r_a_p_product;
RAP_int_j[jj_counter] =
col_map_offd_Pext[i3-num_cols_diag_P];
jj_counter++;
}
else
{
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RAP and can just add new contributions.
*--------------------------------------------------------------*/
else
{
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)
{
i3 = P_diag_j[jj3];
r_a_p_product = r_a_product * P_diag_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)
{
i3 = map_P_to_Pext[P_offd_j[jj3]] + num_cols_diag_P;
r_a_p_product = r_a_product * P_offd_data[jj3];
RAP_int_data[P_marker[i3]] += r_a_p_product;
}
}
}
}
}
if (num_cols_offd_Pext || num_cols_diag_P)
hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST);
hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST);
}
RAP_int = hypre_CSRMatrixCreate(num_cols_offd_RT,num_rows_offd_RT,RAP_size);
hypre_CSRMatrixI(RAP_int) = RAP_int_i;
hypre_CSRMatrixBigJ(RAP_int) = RAP_int_j;
hypre_CSRMatrixData(RAP_int) = RAP_int_data;
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] -= hypre_MPI_Wtime();
#endif
RAP_ext_size = 0;
if (num_sends_RT || num_recvs_RT)
{
RAP_ext = hypre_ExchangeRAPData(RAP_int,comm_pkg_RT);
RAP_ext_i = hypre_CSRMatrixI(RAP_ext);
RAP_ext_j = hypre_CSRMatrixBigJ(RAP_ext);
RAP_ext_data = hypre_CSRMatrixData(RAP_ext);
RAP_ext_size = RAP_ext_i[hypre_CSRMatrixNumRows(RAP_ext)];
}
if (num_cols_offd_RT)
{
hypre_CSRMatrixDestroy(RAP_int);
RAP_int = NULL;
}
RAP_diag_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT+1, HYPRE_MEMORY_SHARED);
RAP_offd_i = hypre_TAlloc(HYPRE_Int, num_cols_diag_RT+1, HYPRE_MEMORY_SHARED);
first_col_diag_RAP = first_col_diag_P;
last_col_diag_RAP = first_col_diag_P + num_cols_diag_P - 1;
/*-----------------------------------------------------------------------
* check for new nonzero columns in RAP_offd generated through RAP_ext
*-----------------------------------------------------------------------*/
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedBigIntMap col_map_offd_RAP_inverse;
if (RAP_ext_size || num_cols_offd_Pext)
{
hypre_UnorderedBigIntSet found_set;
hypre_UnorderedBigIntSetCreate(&found_set, 2*(RAP_ext_size + num_cols_offd_Pext), 16*hypre_NumThreads());
cnt = 0;
#pragma omp parallel private(i)
{
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < RAP_ext_size; i++)
{
if (RAP_ext_j[i] < first_col_diag_RAP
|| RAP_ext_j[i] > last_col_diag_RAP)
hypre_UnorderedBigIntSetPut(&found_set, RAP_ext_j[i]);
}
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_offd_Pext; i++)
{
hypre_UnorderedBigIntSetPut(&found_set, col_map_offd_Pext[i]);
}
} /* omp parallel */
temp = hypre_UnorderedBigIntSetCopyToArray(&found_set, &num_cols_offd_RAP);
hypre_UnorderedBigIntSetDestroy(&found_set);
hypre_big_sort_and_create_inverse_map(temp, num_cols_offd_RAP, &col_map_offd_RAP, &col_map_offd_RAP_inverse);
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (RAP_ext_size || num_cols_offd_Pext)
{
temp = hypre_CTAlloc(HYPRE_BigInt, RAP_ext_size+num_cols_offd_Pext, HYPRE_MEMORY_HOST);
cnt = 0;
for (i=0; i < RAP_ext_size; i++)
if (RAP_ext_j[i] < first_col_diag_RAP
|| RAP_ext_j[i] > last_col_diag_RAP)
temp[cnt++] = RAP_ext_j[i];
for (i=0; i < num_cols_offd_Pext; i++)
temp[cnt++] = col_map_offd_Pext[i];
if (cnt)
{
hypre_BigQsort0(temp,0,cnt-1);
HYPRE_BigInt value = temp[0];
num_cols_offd_RAP = 1;
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_RAP++] = value;
}
}
}
/* now evaluate col_map_offd_RAP */
if (num_cols_offd_RAP)
col_map_offd_RAP = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_RAP, HYPRE_MEMORY_HOST);
for (i=0 ; i < num_cols_offd_RAP; i++)
col_map_offd_RAP[i] = temp[i];
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
if (num_cols_offd_P)
{
map_P_to_RAP = hypre_TAlloc(HYPRE_Int, num_cols_offd_P, HYPRE_MEMORY_HOST);
cnt = 0;
for (i=0; i < num_cols_offd_RAP; i++)
if (col_map_offd_RAP[i] == col_map_offd_P[cnt])
{
map_P_to_RAP[cnt++] = i;
if (cnt == num_cols_offd_P) break;
}
}
if (num_cols_offd_Pext)
{
map_Pext_to_RAP = hypre_TAlloc(HYPRE_Int, num_cols_offd_Pext, HYPRE_MEMORY_HOST);
cnt = 0;
for (i=0; i < num_cols_offd_RAP; i++)
if (col_map_offd_RAP[i] == col_map_offd_Pext[cnt])
{
map_Pext_to_RAP[cnt++] = i;
if (cnt == num_cols_offd_Pext) break;
}
}
/*-----------------------------------------------------------------------
* Convert RAP_ext column indices
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < RAP_ext_size; i++)
if (RAP_ext_j[i] < first_col_diag_RAP
|| RAP_ext_j[i] > last_col_diag_RAP)
RAP_ext_j[i] = (HYPRE_BigInt)num_cols_diag_P
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
+(HYPRE_BigInt)hypre_UnorderedBigIntMapGet(&col_map_offd_RAP_inverse, RAP_ext_j[i]);
#else
+(HYPRE_BigInt)hypre_BigBinarySearch(col_map_offd_RAP, RAP_ext_j[i],num_cols_offd_RAP);
#endif
else
RAP_ext_j[i] -= first_col_diag_RAP;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (num_cols_offd_RAP)
hypre_UnorderedBigIntMapDestroy(&col_map_offd_RAP_inverse);
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX_RAP] += hypre_MPI_Wtime();
#endif
/* need to allocate new P_marker etc. and make further changes */
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
jj_cnt_diag = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_cnt_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_diag_RT/num_threads;
rest = num_cols_diag_RT - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
P_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_cols_diag_P+num_cols_offd_RAP, HYPRE_MEMORY_HOST);
A_mark_array[ii] = hypre_CTAlloc(HYPRE_Int, num_nz_cols_A, HYPRE_MEMORY_HOST);
P_marker = P_mark_array[ii];
A_marker = A_mark_array[ii];
jj_count_diag = start_indexing;
jj_count_offd = start_indexing;
for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A; i++)
{
A_marker[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over interior c-points.
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
/*--------------------------------------------------------------------
* Set marker for diagonal entry, RAP_{ic,ic}. and for all points
* being added to row ic of RAP_diag and RAP_offd through RAP_ext
*--------------------------------------------------------------------*/
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
if (square)
P_marker[ic] = jj_count_diag++;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (send_map_elmts_RT_inverse_map_initialized)
{
HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);
if (i != -1)
{
for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++)
{
HYPRE_Int jj = send_map_elmts_RT_aggregated[j];
for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++)
{
jcol = (HYPRE_Int)RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
jj_count_diag++;
}
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
jj_count_offd++;
}
}
}
}
} // if (set)
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
for (i=0; i < num_sends_RT; i++)
for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++)
if (send_map_elmts_RT[j] == ic)
{
for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++)
{
jcol = (HYPRE_Int) RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
jj_count_diag++;
}
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
jj_count_offd++;
}
}
}
break;
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_diag.
*--------------------------------------------------------------------*/
for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++)
{
i1 = R_diag_j[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_ext.
*-----------------------------------------------------------*/
for (jj3 = P_ext_diag_i[i2]; jj3 < P_ext_diag_i[i2+1]; jj3++)
{
i3 = P_ext_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_diag)
{
P_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
for (jj3 = P_ext_offd_i[i2]; jj3 < P_ext_offd_i[i2+1]; jj3++)
{
i3 = map_Pext_to_RAP[P_ext_offd_j[jj3]]+num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_offd)
{
P_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (A_marker[i2+num_cols_offd_A] != ic)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2+num_cols_offd_A] = ic;
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_diag.
*-----------------------------------------------------------*/
for (jj3 = P_diag_i[i2]; jj3 < P_diag_i[i2+1]; jj3++)
{
i3 = P_diag_j[jj3];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_diag)
{
P_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
/*-----------------------------------------------------------
* Loop over entries in row i2 of P_offd.
*-----------------------------------------------------------*/
if (num_cols_offd_P)
{
for (jj3 = P_offd_i[i2]; jj3 < P_offd_i[i2+1]; jj3++)
{
i3 = map_P_to_RAP[P_offd_j[jj3]] + num_cols_diag_P;
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (P_marker[i3] < jj_row_begin_offd)
{
P_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
}
}
}
}
/*--------------------------------------------------------------------
* Set RAP_diag_i and RAP_offd_i for this row.
*--------------------------------------------------------------------*/
/*
RAP_diag_i[ic] = jj_row_begin_diag;
RAP_offd_i[ic] = jj_row_begin_offd;
*/
}
jj_cnt_diag[ii] = jj_count_diag;
jj_cnt_offd[ii] = jj_count_offd;
}
for (i=0; i < num_threads-1; i++)
{
jj_cnt_diag[i+1] += jj_cnt_diag[i];
jj_cnt_offd[i+1] += jj_cnt_offd[i];
}
jj_count_diag = jj_cnt_diag[num_threads-1];
jj_count_offd = jj_cnt_offd[num_threads-1];
RAP_diag_i[num_cols_diag_RT] = jj_count_diag;
RAP_offd_i[num_cols_diag_RT] = jj_count_offd;
/*-----------------------------------------------------------------------
* Allocate RAP_diag_data and RAP_diag_j arrays.
* Allocate RAP_offd_data and RAP_offd_j arrays.
*-----------------------------------------------------------------------*/
RAP_diag_size = jj_count_diag;
if (RAP_diag_size)
{
RAP_diag_data = hypre_CTAlloc(HYPRE_Real, RAP_diag_size, HYPRE_MEMORY_SHARED);
RAP_diag_j = hypre_CTAlloc(HYPRE_Int, RAP_diag_size, HYPRE_MEMORY_SHARED);
}
RAP_offd_size = jj_count_offd;
if (RAP_offd_size)
{
RAP_offd_data = hypre_CTAlloc(HYPRE_Real, RAP_offd_size, HYPRE_MEMORY_SHARED);
RAP_offd_j = hypre_CTAlloc(HYPRE_Int, RAP_offd_size, HYPRE_MEMORY_SHARED);
}
if (RAP_offd_size == 0 && num_cols_offd_RAP != 0)
{
num_cols_offd_RAP = 0;
hypre_TFree(col_map_offd_RAP, HYPRE_MEMORY_HOST);
}
RA_diag_data_array = hypre_TAlloc(HYPRE_Real, num_cols_diag_A*num_threads, HYPRE_MEMORY_HOST);
RA_diag_j_array = hypre_TAlloc(HYPRE_Int, num_cols_diag_A*num_threads, HYPRE_MEMORY_HOST);
if (num_cols_offd_A)
{
RA_offd_data_array = hypre_TAlloc(HYPRE_Real, num_cols_offd_A*num_threads, HYPRE_MEMORY_HOST);
RA_offd_j_array = hypre_TAlloc(HYPRE_Int, num_cols_offd_A*num_threads, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------------
* Second Pass: Fill in RAP_diag_data and RAP_diag_j.
* Second Pass: Fill in RAP_offd_data and RAP_offd_j.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,k,jcol,ii,ic,i1,i2,i3,jj1,jj2,jj3,ns,ne,size,rest,jj_count_diag,jj_count_offd,jj_row_begin_diag,jj_row_begin_offd,A_marker,P_marker,r_entry,r_a_product,r_a_p_product) HYPRE_SMP_SCHEDULE
#endif
for (ii = 0; ii < num_threads; ii++)
{
size = num_cols_diag_RT/num_threads;
rest = num_cols_diag_RT - size*num_threads;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
/*-----------------------------------------------------------------------
* Initialize some stuff.
*-----------------------------------------------------------------------*/
P_marker = P_mark_array[ii];
A_marker = A_mark_array[ii];
for (ic = 0; ic < num_cols_diag_P+num_cols_offd_RAP; ic++)
{
P_marker[ic] = -1;
}
for (i = 0; i < num_nz_cols_A ; i++)
{
A_marker[i] = -1;
}
jj_count_diag = start_indexing;
jj_count_offd = start_indexing;
if (ii > 0)
{
jj_count_diag = jj_cnt_diag[ii-1];
jj_count_offd = jj_cnt_offd[ii-1];
}
// temporal matrix RA = R*A
// only need to store one row per thread because R*A and (R*A)*P are fused
// into one loop.
hypre_CSRMatrix RA_diag, RA_offd;
RA_diag.data = RA_diag_data_array + num_cols_diag_A*ii;
RA_diag.j = RA_diag_j_array + num_cols_diag_A*ii;
RA_diag.num_nonzeros = 0;
RA_offd.num_nonzeros = 0;
if (num_cols_offd_A)
{
RA_offd.data = RA_offd_data_array + num_cols_offd_A*ii;
RA_offd.j = RA_offd_j_array + num_cols_offd_A*ii;
}
/*-----------------------------------------------------------------------
* Loop over interior c-points.
*-----------------------------------------------------------------------*/
for (ic = ns; ic < ne; ic++)
{
/*--------------------------------------------------------------------
* Create diagonal entry, RAP_{ic,ic} and add entries of RAP_ext
*--------------------------------------------------------------------*/
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
RAP_diag_i[ic] = jj_row_begin_diag;
RAP_offd_i[ic] = jj_row_begin_offd;
HYPRE_Int ra_row_begin_diag = RA_diag.num_nonzeros;
HYPRE_Int ra_row_begin_offd = RA_offd.num_nonzeros;
if (square)
{
P_marker[ic] = jj_count_diag;
RAP_diag_data[jj_count_diag] = zero;
RAP_diag_j[jj_count_diag] = ic;
jj_count_diag++;
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (send_map_elmts_RT_inverse_map_initialized)
{
HYPRE_Int i = hypre_UnorderedIntMapGet(&send_map_elmts_RT_inverse_map, ic);
if (i != -1)
{
for (j = send_map_elmts_starts_RT_aggregated[i]; j < send_map_elmts_starts_RT_aggregated[i + 1]; j++)
{
HYPRE_Int jj = send_map_elmts_RT_aggregated[j];
for (k=RAP_ext_i[jj]; k < RAP_ext_i[jj+1]; k++)
{
jcol = (HYPRE_Int)RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
RAP_diag_data[jj_count_diag]
= RAP_ext_data[k];
RAP_diag_j[jj_count_diag] = jcol;
jj_count_diag++;
}
else
RAP_diag_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
RAP_offd_data[jj_count_offd]
= RAP_ext_data[k];
RAP_offd_j[jj_count_offd]
= jcol-num_cols_diag_P;
jj_count_offd++;
}
else
RAP_offd_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
}
}
} // if (set)
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
for (i=0; i < num_sends_RT; i++)
for (j = send_map_starts_RT[i]; j < send_map_starts_RT[i+1]; j++)
if (send_map_elmts_RT[j] == ic)
{
for (k=RAP_ext_i[j]; k < RAP_ext_i[j+1]; k++)
{
jcol = (HYPRE_Int)RAP_ext_j[k];
if (jcol < num_cols_diag_P)
{
if (P_marker[jcol] < jj_row_begin_diag)
{
P_marker[jcol] = jj_count_diag;
RAP_diag_data[jj_count_diag]
= RAP_ext_data[k];
RAP_diag_j[jj_count_diag] = jcol;
jj_count_diag++;
}
else
RAP_diag_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
else
{
if (P_marker[jcol] < jj_row_begin_offd)
{
P_marker[jcol] = jj_count_offd;
RAP_offd_data[jj_count_offd]
= RAP_ext_data[k];
RAP_offd_j[jj_count_offd]
= jcol-num_cols_diag_P;
jj_count_offd++;
}
else
RAP_offd_data[P_marker[jcol]]
+= RAP_ext_data[k];
}
}
break;
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
/*--------------------------------------------------------------------
* Loop over entries in row ic of R_diag and compute row ic of RA.
*--------------------------------------------------------------------*/
for (jj1 = R_diag_i[ic]; jj1 < R_diag_i[ic+1]; jj1++)
{
i1 = R_diag_j[jj1];
r_entry = R_diag_data[jj1];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[i1]; jj2 < A_offd_i[i1+1]; jj2++)
{
i2 = A_offd_j[jj2];
HYPRE_Real a_entry = A_offd_data[jj2];
HYPRE_Int marker = A_marker[i2];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (marker < ra_row_begin_offd)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2] = RA_offd.num_nonzeros;
RA_offd.data[RA_offd.num_nonzeros - ra_row_begin_offd] = r_entry * a_entry;
RA_offd.j[RA_offd.num_nonzeros - ra_row_begin_offd] = i2;
RA_offd.num_nonzeros++;
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RA and can just add new contributions.
*--------------------------------------------------------------*/
else
{
RA_offd.data[marker - ra_row_begin_offd] += r_entry * a_entry;
// JSP: compiler will more likely to generate FMA instructions
// when we don't eliminate common subexpressions of
// r_entry * A_offd_data[jj2] manually.
}
} // loop over entries in row i1 of A_offd
} // num_cols_offd_A
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[i1]; jj2 < A_diag_i[i1+1]; jj2++)
{
i2 = A_diag_j[jj2];
HYPRE_Real a_entry = A_diag_data[jj2];
HYPRE_Int marker = A_marker[i2+num_cols_offd_A];
/*--------------------------------------------------------------
* Check A_marker to see if point i2 has been previously
* visited. New entries in RAP only occur from unmarked points.
*--------------------------------------------------------------*/
if (marker < ra_row_begin_diag)
{
/*-----------------------------------------------------------
* Mark i2 as visited.
*-----------------------------------------------------------*/
A_marker[i2+num_cols_offd_A] = RA_diag.num_nonzeros;
RA_diag.data[RA_diag.num_nonzeros - ra_row_begin_diag] = r_entry * a_entry;
RA_diag.j[RA_diag.num_nonzeros - ra_row_begin_diag] = i2;
RA_diag.num_nonzeros++;
}
/*--------------------------------------------------------------
* If i2 is previously visited ( A_marker[12]=ic ) it yields
* no new entries in RA and can just add new contributions.
*--------------------------------------------------------------*/
else
{
RA_diag.data[marker - ra_row_begin_diag] += r_entry * a_entry;
}
} // loop over entries in row i1 of A_diag
} // loop over entries in row ic of R_diag
/*--------------------------------------------------------------------
* Loop over entries in row ic of RA_offd.
*--------------------------------------------------------------------*/
for (jj1 = ra_row_begin_offd; jj1 < RA_offd.num_nonzeros; jj1++)
{
i1 = RA_offd.j[jj1 - ra_row_begin_offd];
r_a_product = RA_offd.data[jj1 - ra_row_begin_offd];
/*-----------------------------------------------------------
* Loop over entries in row i1 of P_ext.
*-----------------------------------------------------------*/
for (jj2 = P_ext_diag_i[i1]; jj2 < P_ext_diag_i[i1+1]; jj2++)
{
i2 = P_ext_diag_j[jj2];
HYPRE_Real p_entry = P_ext_diag_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_diag)
{
P_marker[i2] = jj_count_diag;
RAP_diag_data[jj_count_diag] = r_a_product * p_entry;
RAP_diag_j[jj_count_diag] = i2;
jj_count_diag++;
}
else
RAP_diag_data[marker] += r_a_product * p_entry;
}
for (jj2 = P_ext_offd_i[i1]; jj2 < P_ext_offd_i[i1+1]; jj2++)
{
i2 = map_Pext_to_RAP[P_ext_offd_j[jj2]] + num_cols_diag_P;
HYPRE_Real p_entry = P_ext_offd_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_offd)
{
P_marker[i2] = jj_count_offd;
RAP_offd_data[jj_count_offd] = r_a_product * p_entry;
RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P;
jj_count_offd++;
}
else
RAP_offd_data[marker] += r_a_product * p_entry;
}
} // loop over entries in row ic of RA_offd
/*--------------------------------------------------------------------
* Loop over entries in row ic of RA_diag.
*--------------------------------------------------------------------*/
for (jj1 = ra_row_begin_diag; jj1 < RA_diag.num_nonzeros; jj1++)
{
HYPRE_Int i1 = RA_diag.j[jj1 - ra_row_begin_diag];
HYPRE_Real r_a_product = RA_diag.data[jj1 - ra_row_begin_diag];
/*-----------------------------------------------------------------
* Loop over entries in row i1 of P_diag.
*-----------------------------------------------------------------*/
for (jj2 = P_diag_i[i1]; jj2 < P_diag_i[i1+1]; jj2++)
{
i2 = P_diag_j[jj2];
HYPRE_Real p_entry = P_diag_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_diag)
{
P_marker[i2] = jj_count_diag;
RAP_diag_data[jj_count_diag] = r_a_product * p_entry;
RAP_diag_j[jj_count_diag] = i2;
jj_count_diag++;
}
else
{
RAP_diag_data[marker] += r_a_product * p_entry;
}
}
if (num_cols_offd_P)
{
for (jj2 = P_offd_i[i1]; jj2 < P_offd_i[i1+1]; jj2++)
{
i2 = map_P_to_RAP[P_offd_j[jj2]] + num_cols_diag_P;
HYPRE_Real p_entry = P_offd_data[jj2];
HYPRE_Int marker = P_marker[i2];
/*--------------------------------------------------------
* Check P_marker to see that RAP_{ic,i2} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (marker < jj_row_begin_offd)
{
P_marker[i2] = jj_count_offd;
RAP_offd_data[jj_count_offd] = r_a_product * p_entry;
RAP_offd_j[jj_count_offd] = i2 - num_cols_diag_P;
jj_count_offd++;
}
else
{
RAP_offd_data[marker] += r_a_product * p_entry;
}
}
} // num_cols_offd_P
} // loop over entries in row ic of RA_diag.
} // Loop over interior c-points.
hypre_TFree(P_mark_array[ii], HYPRE_MEMORY_HOST);
hypre_TFree(A_mark_array[ii], HYPRE_MEMORY_HOST);
} // omp parallel for
/* check if really all off-diagonal entries occurring in col_map_offd_RAP
are represented and eliminate if necessary */
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_RAP, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_offd_RAP; i++)
P_marker[i] = -1;
jj_count_offd = 0;
#ifdef HYPRE_USING_ATOMIC
#pragma omp parallel for private(i3) reduction(+:jj_count_offd) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < RAP_offd_size; i++)
{
i3 = RAP_offd_j[i];
#ifdef HYPRE_USING_ATOMIC
if (hypre_compare_and_swap(P_marker + i3, -1, 0) == -1)
{
jj_count_offd++;
}
#else
if (P_marker[i3])
{
P_marker[i3] = 0;
jj_count_offd++;
}
#endif
}
if (jj_count_offd < num_cols_offd_RAP)
{
new_col_map_offd_RAP = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST);
jj_counter = 0;
for (i=0; i < num_cols_offd_RAP; i++)
if (!P_marker[i])
{
P_marker[i] = jj_counter;
new_col_map_offd_RAP[jj_counter++] = col_map_offd_RAP[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i3) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < RAP_offd_size; i++)
{
i3 = RAP_offd_j[i];
RAP_offd_j[i] = P_marker[i3];
}
num_cols_offd_RAP = jj_count_offd;
hypre_TFree(col_map_offd_RAP, HYPRE_MEMORY_HOST);
col_map_offd_RAP = new_col_map_offd_RAP;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
RAP = hypre_ParCSRMatrixCreate(comm, n_coarse_RT, n_coarse,
RT_partitioning, coarse_partitioning,
num_cols_offd_RAP, RAP_diag_size,
RAP_offd_size);
/* Have RAP own coarse_partitioning instead of P */
hypre_ParCSRMatrixSetColStartsOwner(P,0);
hypre_ParCSRMatrixSetColStartsOwner(RT,0);
RAP_diag = hypre_ParCSRMatrixDiag(RAP);
hypre_CSRMatrixI(RAP_diag) = RAP_diag_i;
if (RAP_diag_size)
{
hypre_CSRMatrixData(RAP_diag) = RAP_diag_data;
hypre_CSRMatrixJ(RAP_diag) = RAP_diag_j;
}
RAP_offd = hypre_ParCSRMatrixOffd(RAP);
hypre_CSRMatrixI(RAP_offd) = RAP_offd_i;
if (num_cols_offd_RAP)
{
hypre_CSRMatrixData(RAP_offd) = RAP_offd_data;
hypre_CSRMatrixJ(RAP_offd) = RAP_offd_j;
hypre_ParCSRMatrixColMapOffd(RAP) = col_map_offd_RAP;
}
if (num_procs > 1)
{
/* hypre_GenerateRAPCommPkg(RAP, A); */
hypre_MatvecCommPkgCreate(RAP);
}
*RAP_ptr = RAP;
/*-----------------------------------------------------------------------
* Free R, P_ext and marker arrays.
*-----------------------------------------------------------------------*/
if (keepTranspose)
{
hypre_ParCSRMatrixDiagT(RT) = R_diag;
}
else
{
hypre_CSRMatrixDestroy(R_diag);
}
R_diag = NULL;
if (num_cols_offd_RT)
{
if (keepTranspose)
{
hypre_ParCSRMatrixOffdT(RT) = R_offd;
}
else
{
hypre_CSRMatrixDestroy(R_offd);
}
R_offd = NULL;
}
if (num_sends_RT || num_recvs_RT)
{
hypre_CSRMatrixDestroy(RAP_ext);
RAP_ext = NULL;
}
hypre_TFree(P_mark_array, HYPRE_MEMORY_HOST);
hypre_TFree(A_mark_array, HYPRE_MEMORY_HOST);
hypre_TFree(P_ext_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(P_ext_offd_i, HYPRE_MEMORY_HOST);
hypre_TFree(jj_cnt_diag, HYPRE_MEMORY_HOST);
hypre_TFree(jj_cnt_offd, HYPRE_MEMORY_HOST);
if (num_cols_offd_P)
{
hypre_TFree(map_P_to_Pext, HYPRE_MEMORY_HOST);
hypre_TFree(map_P_to_RAP, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_Pext)
{
hypre_TFree(col_map_offd_Pext, HYPRE_MEMORY_HOST);
hypre_TFree(map_Pext_to_RAP, HYPRE_MEMORY_HOST);
}
if (P_ext_diag_size)
{
hypre_TFree(P_ext_diag_data, HYPRE_MEMORY_HOST);
hypre_TFree(P_ext_diag_j, HYPRE_MEMORY_HOST);
}
if (P_ext_offd_size)
{
hypre_TFree(P_ext_offd_data, HYPRE_MEMORY_HOST);
hypre_TFree(P_ext_offd_j, HYPRE_MEMORY_HOST);
}
hypre_TFree(RA_diag_data_array, HYPRE_MEMORY_HOST);
hypre_TFree(RA_diag_j_array, HYPRE_MEMORY_HOST);
if (num_cols_offd_A)
{
hypre_TFree(RA_offd_data_array, HYPRE_MEMORY_HOST);
hypre_TFree(RA_offd_j_array, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
if (send_map_elmts_RT_inverse_map_initialized)
{
hypre_UnorderedIntMapDestroy(&send_map_elmts_RT_inverse_map);
}
hypre_TFree(send_map_elmts_starts_RT_aggregated, HYPRE_MEMORY_HOST);
hypre_TFree(send_map_elmts_RT_aggregated, HYPRE_MEMORY_HOST);
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RAP] += hypre_MPI_Wtime();
#endif
return(0);
}
|
atomics.h | #ifndef ATOMICS_H
#define ATOMICS_H
#include<omp.h>
#include"graph.h"
inline void ATOMIC_ADD(value_t *target, value_t val) {
if(val == 0) return;
#pragma omp atomic
*target += val;
}
inline void ATOMIC_MULT(value_t *target, value_t val) {
if(val == 1) return;
#pragma omp atomic
*target *= val;
}
inline void ATOMIC_MIN(value_t *target, value_t value){
#pragma omp critical
{
*target = (*target < value) ? *target : value;
}
}
inline void ATOMIC_AND(value_t *target, value_t val) {
#pragma omp atomic
*target &= val;
}
inline void ATOMIC_OR(value_t *target, value_t val) {
#pragma omp atomic
*target |= val;
}
#endif
|
parser.h | /* Data structures and function exported by the C++ Parser.
Copyright (C) 2010-2018 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_CP_PARSER_H
#define GCC_CP_PARSER_H
#include "tree.h"
#include "cp/cp-tree.h"
#include "c-family/c-pragma.h"
/* A token's value and its associated deferred access checks and
qualifying scope. */
struct GTY(()) tree_check {
/* The value associated with the token. */
tree value;
/* The checks that have been associated with value. */
vec<deferred_access_check, va_gc> *checks;
/* The token's qualifying scope (used when it is a
CPP_NESTED_NAME_SPECIFIER). */
tree qualifying_scope;
};
/* A C++ token. */
struct GTY (()) cp_token {
/* The kind of token. */
ENUM_BITFIELD (cpp_ttype) type : 8;
/* If this token is a keyword, this value indicates which keyword.
Otherwise, this value is RID_MAX. */
ENUM_BITFIELD (rid) keyword : 8;
/* Token flags. */
unsigned char flags;
/* True if this token is from a context where it is implicitly extern "C" */
BOOL_BITFIELD implicit_extern_c : 1;
/* True if an error has already been reported for this token, such as a
CPP_NAME token that is not a keyword (i.e., for which KEYWORD is
RID_MAX) iff this name was looked up and found to be ambiguous. */
BOOL_BITFIELD error_reported : 1;
/* True for a token that has been purged. If a token is purged,
it is no longer a valid token and it should be considered
deleted. */
BOOL_BITFIELD purged_p : 1;
/* 5 unused bits. */
/* The location at which this token was found. */
location_t location;
/* The value associated with this token, if any. */
union cp_token_value {
/* Used for compound tokens such as CPP_NESTED_NAME_SPECIFIER. */
struct tree_check* GTY((tag ("1"))) tree_check_value;
/* Use for all other tokens. */
tree GTY((tag ("0"))) value;
} GTY((desc ("(%1.type == CPP_TEMPLATE_ID)"
"|| (%1.type == CPP_NESTED_NAME_SPECIFIER)"
"|| (%1.type == CPP_DECLTYPE)"))) u;
};
/* We use a stack of token pointer for saving token sets. */
typedef struct cp_token *cp_token_position;
/* The cp_lexer structure represents the C++ lexer. It is responsible
for managing the token stream from the preprocessor and supplying
it to the parser. Tokens are never added to the cp_lexer after
it is created. */
struct GTY (()) cp_lexer {
/* The memory allocated for the buffer. NULL if this lexer does not
own the token buffer. */
vec<cp_token, va_gc> *buffer;
/* A pointer just past the last available token. The tokens
in this lexer are [buffer, last_token). */
cp_token_position GTY ((skip)) last_token;
/* The next available token. If NEXT_TOKEN is &eof_token, then there are
no more available tokens. */
cp_token_position GTY ((skip)) next_token;
/* A stack indicating positions at which cp_lexer_save_tokens was
called. The top entry is the most recent position at which we
began saving tokens. If the stack is non-empty, we are saving
tokens. */
vec<cp_token_position> GTY ((skip)) saved_tokens;
/* The next lexer in a linked list of lexers. */
struct cp_lexer *next;
/* True if we should output debugging information. */
bool debugging_p;
/* True if we're in the context of parsing a pragma, and should not
increment past the end-of-line marker. */
bool in_pragma;
};
/* cp_token_cache is a range of tokens. There is no need to represent
allocate heap memory for it, since tokens are never removed from the
lexer's array. There is also no need for the GC to walk through
a cp_token_cache, since everything in here is referenced through
a lexer. */
struct GTY(()) cp_token_cache {
/* The beginning of the token range. */
cp_token * GTY((skip)) first;
/* Points immediately after the last token in the range. */
cp_token * GTY ((skip)) last;
};
typedef cp_token_cache *cp_token_cache_ptr;
struct cp_token_ident
{
unsigned int ident_len;
const char *ident_str;
unsigned int before_len;
const char *before_str;
unsigned int after_len;
const char *after_str;
};
/* An entry in a queue of function arguments that require post-processing. */
struct GTY(()) cp_default_arg_entry {
/* The current_class_type when we parsed this arg. */
tree class_type;
/* The function decl itself. */
tree decl;
};
/* An entry in a stack for member functions defined within their classes. */
struct GTY(()) cp_unparsed_functions_entry {
/* Functions with default arguments that require post-processing.
Functions appear in this list in declaration order. */
vec<cp_default_arg_entry, va_gc> *funs_with_default_args;
/* Functions with defintions that require post-processing. Functions
appear in this list in declaration order. */
vec<tree, va_gc> *funs_with_definitions;
/* Non-static data members with initializers that require post-processing.
FIELD_DECLs appear in this list in declaration order. */
vec<tree, va_gc> *nsdmis;
/* Nested classes go in this vector, so that we can do some final
processing after parsing any NSDMIs. */
vec<tree, va_gc> *classes;
};
/* The status of a tentative parse. */
enum cp_parser_status_kind
{
/* No errors have occurred. */
CP_PARSER_STATUS_KIND_NO_ERROR,
/* An error has occurred. */
CP_PARSER_STATUS_KIND_ERROR,
/* We are committed to this tentative parse, whether or not an error
has occurred. */
CP_PARSER_STATUS_KIND_COMMITTED
};
/* Context that is saved and restored when parsing tentatively. */
struct GTY (()) cp_parser_context {
/* If this is a tentative parsing context, the status of the
tentative parse. */
enum cp_parser_status_kind status;
/* If non-NULL, we have just seen a `x->' or `x.' expression. Names
that are looked up in this context must be looked up both in the
scope given by OBJECT_TYPE (the type of `x' or `*x') and also in
the context of the containing expression. */
tree object_type;
/* The next parsing context in the stack. */
struct cp_parser_context *next;
};
/* Helper data structure for parsing #pragma omp declare simd. */
struct cp_omp_declare_simd_data {
bool error_seen; /* Set if error has been reported. */
bool fndecl_seen; /* Set if one fn decl/definition has been seen already. */
vec<cp_token_cache_ptr> tokens;
tree clauses;
};
/* Helper data structure for parsing #pragma acc routine. */
struct cp_oacc_routine_data : cp_omp_declare_simd_data {
location_t loc;
};
/* The cp_parser structure represents the C++ parser. */
struct GTY(()) cp_parser {
/* The lexer from which we are obtaining tokens. */
cp_lexer *lexer;
/* The scope in which names should be looked up. If NULL_TREE, then
we look up names in the scope that is currently open in the
source program. If non-NULL, this is either a TYPE or
NAMESPACE_DECL for the scope in which we should look. It can
also be ERROR_MARK, when we've parsed a bogus scope.
This value is not cleared automatically after a name is looked
up, so we must be careful to clear it before starting a new look
up sequence. (If it is not cleared, then `X::Y' followed by `Z'
will look up `Z' in the scope of `X', rather than the current
scope.) Unfortunately, it is difficult to tell when name lookup
is complete, because we sometimes peek at a token, look it up,
and then decide not to consume it. */
tree scope;
/* OBJECT_SCOPE and QUALIFYING_SCOPE give the scopes in which the
last lookup took place. OBJECT_SCOPE is used if an expression
like "x->y" or "x.y" was used; it gives the type of "*x" or "x",
respectively. QUALIFYING_SCOPE is used for an expression of the
form "X::Y"; it refers to X. */
tree object_scope;
tree qualifying_scope;
/* A stack of parsing contexts. All but the bottom entry on the
stack will be tentative contexts.
We parse tentatively in order to determine which construct is in
use in some situations. For example, in order to determine
whether a statement is an expression-statement or a
declaration-statement we parse it tentatively as a
declaration-statement. If that fails, we then reparse the same
token stream as an expression-statement. */
cp_parser_context *context;
/* True if we are parsing GNU C++. If this flag is not set, then
GNU extensions are not recognized. */
bool allow_gnu_extensions_p;
/* TRUE if the `>' token should be interpreted as the greater-than
operator. FALSE if it is the end of a template-id or
template-parameter-list. In C++0x mode, this flag also applies to
`>>' tokens, which are viewed as two consecutive `>' tokens when
this flag is FALSE. */
bool greater_than_is_operator_p;
/* TRUE if default arguments are allowed within a parameter list
that starts at this point. FALSE if only a gnu extension makes
them permissible. */
bool default_arg_ok_p;
/* TRUE if we are parsing an integral constant-expression. See
[expr.const] for a precise definition. */
bool integral_constant_expression_p;
/* TRUE if we are parsing an integral constant-expression -- but a
non-constant expression should be permitted as well. This flag
is used when parsing an array bound so that GNU variable-length
arrays are tolerated. */
bool allow_non_integral_constant_expression_p;
/* TRUE if ALLOW_NON_CONSTANT_EXPRESSION_P is TRUE and something has
been seen that makes the expression non-constant. */
bool non_integral_constant_expression_p;
/* TRUE if local variable names and `this' are forbidden in the
current context. */
bool local_variables_forbidden_p;
/* TRUE if the declaration we are parsing is part of a
linkage-specification of the form `extern string-literal
declaration'. */
bool in_unbraced_linkage_specification_p;
/* TRUE if we are presently parsing a declarator, after the
direct-declarator. */
bool in_declarator_p;
/* TRUE if we are presently parsing a template-argument-list. */
bool in_template_argument_list_p;
/* Set to IN_ITERATION_STMT if parsing an iteration-statement,
to IN_OMP_BLOCK if parsing OpenMP structured block and
IN_OMP_FOR if parsing OpenMP loop. If parsing a switch statement,
this is bitwise ORed with IN_SWITCH_STMT, unless parsing an
iteration-statement, OpenMP block or loop within that switch. */
#define IN_SWITCH_STMT 1
#define IN_ITERATION_STMT 2
#define IN_OMP_BLOCK 4
#define IN_OMP_FOR 8
#define IN_IF_STMT 16
unsigned char in_statement;
/* TRUE if we are presently parsing the body of a switch statement.
Note that this doesn't quite overlap with in_statement above.
The difference relates to giving the right sets of error messages:
"case not in switch" vs "break statement used with OpenMP...". */
bool in_switch_statement_p;
/* TRUE if we are parsing a type-id in an expression context. In
such a situation, both "type (expr)" and "type (type)" are valid
alternatives. */
bool in_type_id_in_expr_p;
/* TRUE if we are currently in a header file where declarations are
implicitly extern "C". */
bool implicit_extern_c;
/* TRUE if strings in expressions should be translated to the execution
character set. */
bool translate_strings_p;
/* TRUE if we are presently parsing the body of a function, but not
a local class. */
bool in_function_body;
/* Nonzero if we're processing a __transaction_atomic or
__transaction_relaxed statement. */
unsigned char in_transaction;
/* TRUE if we can auto-correct a colon to a scope operator. */
bool colon_corrects_to_scope_p;
/* TRUE if : doesn't start a class definition. Should be only used
together with type_definition_forbidden_message non-NULL, in
contexts where new types may not be defined, and the type list
is terminated by colon. */
bool colon_doesnt_start_class_def_p;
/* If non-NULL, then we are parsing a construct where new type
definitions are not permitted. The string stored here will be
issued as an error message if a type is defined. */
const char *type_definition_forbidden_message;
/* A stack used for member functions of local classes. The lists
contained in an individual entry can only be processed once the
outermost class being defined is complete. */
vec<cp_unparsed_functions_entry, va_gc> *unparsed_queues;
/* The number of classes whose definitions are currently in
progress. */
unsigned num_classes_being_defined;
/* The number of template parameter lists that apply directly to the
current declaration. */
unsigned num_template_parameter_lists;
/* When parsing #pragma omp declare simd, this is a pointer to a
helper data structure. */
cp_omp_declare_simd_data * GTY((skip)) omp_declare_simd;
/* When parsing #pragma acc routine, this is a pointer to a helper data
structure. */
cp_oacc_routine_data * GTY((skip)) oacc_routine;
/* Nonzero if parsing a parameter list where 'auto' should trigger an implicit
template parameter. */
bool auto_is_implicit_function_template_parm_p;
/* TRUE if the function being declared was made a template due to its
parameter list containing generic type specifiers (`auto' or concept
identifiers) rather than an explicit template parameter list. */
bool fully_implicit_function_template_p;
/* Tracks the function's template parameter list when declaring a function
using generic type parameters. This is either a new chain in the case of a
fully implicit function template or an extension of the function's existing
template parameter list. This is tracked to optimize calls subsequent
calls to synthesize_implicit_template_parm during
cp_parser_parameter_declaration. */
tree implicit_template_parms;
/* The scope into which an implicit template parameter list has been
introduced or an existing template parameter list is being extended with
implicit template parameters. In most cases this is the sk_function_parms
scope containing the use of a generic type. In the case of an out-of-line
member definition using a generic type, it is the sk_class scope. */
cp_binding_level* implicit_template_scope;
/* True if parsing a result type in a compound requirement. This permits
constrained-type-specifiers inside what would normally be a trailing
return type. */
bool in_result_type_constraint_p;
/* True if a constrained-type-specifier is not allowed in this
context e.g., because they could never be deduced. */
int prevent_constrained_type_specifiers;
/* Location of the string-literal token within the current linkage
specification, if any, or UNKNOWN_LOCATION otherwise. */
location_t innermost_linkage_specification_location;
};
/* In parser.c */
extern void debug (cp_token &ref);
extern void debug (cp_token *ptr);
extern void cp_lexer_debug_tokens (vec<cp_token, va_gc> *);
extern void debug (vec<cp_token, va_gc> &ref);
extern void debug (vec<cp_token, va_gc> *ptr);
extern void cp_debug_parser (FILE *, cp_parser *);
extern void debug (cp_parser &ref);
extern void debug (cp_parser *ptr);
extern bool cp_keyword_starts_decl_specifier_p (enum rid keyword);
#endif /* GCC_CP_PARSER_H */
|
consecutive_write.c | #include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "constants.h"
/**
* Deinterleave (transpose) an IQUV ring buffer page to the ordering needed for FITS files
* Note that this is probably a slow function, and is not meant to be run real-time
*
* data in: tab, channel/4, time/500 packets of time,channel,pn
* data out: tab, channel, pol, time
*
* Suggested use is:
* 1. realtime: ringbuffer -> [trigger] -> dada_dbdisk
* 2. offline: dada_dbdisk -> ringbuffer -> dadafits
*
* @param {const char *} page Ringbuffer page with interleaved data
* @param {const char *} transposed
* @param {int} ntabs Number of tabs
* @param {int} nchannels Number of channels
* @param {int} npackets Number of packets per sequence
*/
void deinterleave (const unsigned char *page, unsigned char *transposed, const int ntabs, const int nchannels, const int npackets) {
const unsigned char *packet = page;
int tab = 0;
for (tab = 0; tab < ntabs; tab++) {
int channel_offset = 0;
for (channel_offset = 0; channel_offset < nchannels; channel_offset+=4) {
const unsigned char *src = &page[(tab * nchannels + channel_offset)*NPOLS*npackets*NSAMPS];
unsigned char *dest = &transposed[(tab * nchannels + channel_offset)*NPOLS*npackets*NSAMPS];
int pc;
for (pc=0; pc < 16; pc+=4) {
int tn = 0;
#pragma omp parallel for
for (tn = 0; tn < npackets*NSAMPS; tn++) {
unsigned int data = *((unsigned int *) &src[tn*NPOLS*NCHANS + pc]);
dest[tn + (pc + 0) * npackets*NSAMPS] = (data >> 0) && 255; // src[tn*NPOLS*NCHANS + 0];
dest[tn + (pc + 1) * npackets*NSAMPS] = (data >> 8) && 255; // src[tn*NPOLS*NCHANS + 1];
dest[tn + (pc + 2) * npackets*NSAMPS] = (data >> 16) && 255; // src[tn*NPOLS*NCHANS + 2];
dest[tn + (pc + 3) * npackets*NSAMPS] = (data >> 24) && 255; // src[tn*NPOLS*NCHANS + 3];
}
}
} // channel_offset
} // tab
}
/* Manually unrolling the loop over pc gives slight speedups; code below for reference
*
int tn = 0;
for (tn = 0; tn < npackets*NSAMPS; tn++) {
unsigned int data = *((unsigned int *) &src[tn*NPOLS*NCHANS]);
dest[tn + 0 * npackets*NSAMPS] = (data >> 0) && 255; // src[tn*NPOLS*NCHANS + 0];
dest[tn + 1 * npackets*NSAMPS] = (data >> 8) && 255; // src[tn*NPOLS*NCHANS + 1];
dest[tn + 2 * npackets*NSAMPS] = (data >> 16) && 255; // src[tn*NPOLS*NCHANS + 2];
dest[tn + 3 * npackets*NSAMPS] = (data >> 24) && 255; // src[tn*NPOLS*NCHANS + 3];
}
for (tn = 0; tn < npackets*NSAMPS; tn++) {
unsigned int data = *((unsigned int *) &src[tn*NPOLS*NCHANS + 4]);
dest[tn + 4 * npackets*NSAMPS] = (data >> 0) && 255; // src[tn*NPOLS*NCHANS + 4];
dest[tn + 5 * npackets*NSAMPS] = (data >> 8) && 255; // src[tn*NPOLS*NCHANS + 5];
dest[tn + 6 * npackets*NSAMPS] = (data >> 16) && 255; // src[tn*NPOLS*NCHANS + 6];
dest[tn + 7 * npackets*NSAMPS] = (data >> 24) && 255; // src[tn*NPOLS*NCHANS + 7];
};
for (tn = 0; tn < npackets*NSAMPS; tn++) {
unsigned int data = *((unsigned int *) &src[tn*NPOLS*NCHANS + 8]);
dest[tn + 8 * npackets*NSAMPS] = (data >> 0) && 255; // src[tn*NPOLS*NCHANS + 8];
dest[tn + 9 * npackets*NSAMPS] = (data >> 8) && 255; // src[tn*NPOLS*NCHANS + 9];
dest[tn + 10 * npackets*NSAMPS] = (data >> 16) && 255; // src[tn*NPOLS*NCHANS + 10];
dest[tn + 11 * npackets*NSAMPS] = (data >> 24) && 255; // src[tn*NPOLS*NCHANS + 11];
};
for (tn = 0; tn < npackets*NSAMPS; tn++) {
unsigned int data = *((unsigned int *) &src[tn*NPOLS*NCHANS] + 12);
dest[tn + 12 * npackets*NSAMPS] = (data >> 0) && 255; // src[tn*NPOLS*NCHANS + 12];
dest[tn + 13 * npackets*NSAMPS] = (data >> 8) && 255; // src[tn*NPOLS*NCHANS + 13];
dest[tn + 14 * npackets*NSAMPS] = (data >> 16) && 255; // src[tn*NPOLS*NCHANS + 14];
dest[tn + 15 * npackets*NSAMPS] = (data >> 24) && 255; // src[tn*NPOLS*NCHANS + 15];
}
*/
|
omp_loop.h | // -*- C++ -*-
// Copyright (C) 2007-2018 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/omp_loop.h
* @brief Parallelization of embarrassingly parallel execution by
* means of an OpenMP for loop.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_OMP_LOOP_H
#define _GLIBCXX_PARALLEL_OMP_LOOP_H 1
#include <omp.h>
#include <parallel/settings.h>
#include <parallel/basic_iterator.h>
#include <parallel/base.h>
namespace __gnu_parallel
{
/** @brief Embarrassingly parallel algorithm for random access
* iterators, using an OpenMP for loop.
*
* @param __begin Begin iterator of element sequence.
* @param __end End iterator of element sequence.
* @param __o User-supplied functor (comparator, predicate, adding
* functor, etc.).
* @param __f Functor to @a process an element with __op (depends on
* desired functionality, e. g. for std::for_each(), ...).
* @param __r Functor to @a add a single __result to the already
* processed elements (depends on functionality).
* @param __base Base value for reduction.
* @param __output Pointer to position where final result is written to
* @param __bound Maximum number of elements processed (e. g. for
* std::count_n()).
* @return User-supplied functor (that may contain a part of the result).
*/
template<typename _RAIter,
typename _Op,
typename _Fu,
typename _Red,
typename _Result>
_Op
__for_each_template_random_access_omp_loop(_RAIter __begin, _RAIter __end,
_Op __o, _Fu& __f, _Red __r,
_Result __base,
_Result& __output,
typename std::iterator_traits<_RAIter>::difference_type __bound)
{
typedef typename std::iterator_traits<_RAIter>::difference_type
_DifferenceType;
_DifferenceType __length = __end - __begin;
_ThreadIndex __num_threads = __gnu_parallel::min<_DifferenceType>
(__get_max_threads(), __length);
_Result *__thread_results;
# pragma omp parallel num_threads(__num_threads)
{
# pragma omp single
{
__num_threads = omp_get_num_threads();
__thread_results = new _Result[__num_threads];
for (_ThreadIndex __i = 0; __i < __num_threads; ++__i)
__thread_results[__i] = _Result();
}
_ThreadIndex __iam = omp_get_thread_num();
#pragma omp for schedule(dynamic, _Settings::get().workstealing_chunk_size)
for (_DifferenceType __pos = 0; __pos < __length; ++__pos)
__thread_results[__iam] = __r(__thread_results[__iam],
__f(__o, __begin+__pos));
} //parallel
for (_ThreadIndex __i = 0; __i < __num_threads; ++__i)
__output = __r(__output, __thread_results[__i]);
delete [] __thread_results;
// Points to last element processed (needed as return value for
// some algorithms like transform).
__f._M_finish_iterator = __begin + __length;
return __o;
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_OMP_LOOP_H */
|
graph.c | #include "graph.h"
// Returns an array of graph nodes built from a csr format matrix
node* buildGraph(int vertices, const int* colIDs, const int* rowDel){
// Allocate memory for graph array
node* graph = calloc(vertices, sizeof(node));
// Go through nodes and initialize them
#pragma omp parallel shared(graph, vertices, colIDs, rowDel) default(none)
{
#pragma omp single
for (int i = 0; i < vertices; i++) {
graph[i].id = i;
graph[i].level = INT_MAX;
// Count neighbors
graph[i].neighborsNum = rowDel[i + 1] - rowDel[i];
// Add neighbors to the neighbors list
graph[i].neighbors = malloc(graph[i].neighborsNum * sizeof(int));
}
#pragma omp for
for (int i = 0; i < vertices; i++) {
for (int k = rowDel[graph[i].id], elCounter = 0; k < rowDel[graph[i].id + 1]; k++) {
if (colIDs[k] != graph[i].id) {
graph[i].neighbors[elCounter] = colIDs[k];
elCounter++;
}else{
graph[i].neighborsNum--;
}
}
}
}
return graph;
}
node* buildGraphSequential(int vertices, const int* colIDs, const int* rowDel){
// Allocate memory for graph array
node* graph = calloc(vertices, sizeof(node));
// Go through nodes and initialize them
for(int i = 0; i < vertices; i++){
graph[i].id = i;
graph[i].level = INT_MAX;
// Count neighbors
graph[i].neighborsNum = rowDel[i+1] - rowDel[i];
// Add neighbors to the neighbors list
graph[i].neighbors = malloc(graph[i].neighborsNum * sizeof(int));
int elCounter = 0;
for(int k = rowDel[graph[i].id]; k < rowDel[graph[i].id + 1]; k++){
if(colIDs[k] == graph[i].id){
graph[i].neighborsNum--;
}
if(colIDs[k] != graph[i].id){
graph[i].neighbors[elCounter] = colIDs[k];
elCounter++;
}
}
}
return graph;
}
// Return a list of nodes-neighbors, children, and the count of children of a node that are on a specified level
int neighborsAtLevel(node* graph, node* n, int level, node** children){
// Count children
int childrenCount = 0;
// Place children in the children array
for(int j = 0; j < n->neighborsNum; j++){
if(graph[n->neighbors[j]].level == level){
children[childrenCount++] = &graph[n->neighbors[j]];
}
}
// Return children count
return childrenCount;
}
// Get the height, width and number of nodes on each level of the graph
void graphHeightWidthCounts(node* graph, int vertices, int* height, int* width, int** levelCounts){
// Allocate shared arrays
int maxThreads = omp_get_max_threads();
int* localcount = calloc(maxThreads * vertices, sizeof(int));
int* localmax = calloc(maxThreads, sizeof(int));
#pragma omp parallel shared(localcount, localmax, graph, vertices, height, levelCounts) default(none)
{
int tid = omp_get_thread_num();
int teamThreads = omp_get_team_size(omp_get_level());
#pragma omp for schedule(guided) nowait
for(int i = 0; i < vertices; i++){
localcount[tid * vertices + graph[i].level]++;
if(localmax[tid] < graph[i].level){
localmax[tid] = graph[i].level;
}
}
#pragma omp single
{
// Determine height (max_level)
*height = 0;
for (int k = 0; k < teamThreads; k++) {
if (*height < localmax[k]) {
*height = localmax[k];
}
}
// Form counts
*levelCounts = calloc(*height + 1, sizeof(int));
}
#pragma omp for schedule(guided) nowait
for(int i = 0; i < *height + 1; i++){
for(int id = 0; id < teamThreads; id++){
(*levelCounts)[i] += localcount[id * vertices + i];
}
}
}
// Determine width (maximum number of nodes on a level)
*width = *levelCounts[0];
for(int j = 1; j < *height + 1; j++){
if((*levelCounts)[j] > *width){
*width = (*levelCounts)[j];
}
}
// Free memory
free(localcount);
free(localmax);
}
// Returns a pointer to an array with the nodes at a specific level.
node** graphVerticesAt(node* graph, int vertices, int level, int levelNodes){
// Allocate returned array
node** verticesAtLevel = calloc(levelNodes, sizeof(node*));
int verticesAtLevelIdx = 0;
// Scan graph adding vertices at level to the results array
for(int i = 0; i < vertices; i++){
if(graph[i].level == level){
verticesAtLevel[verticesAtLevelIdx] = &graph[i];
verticesAtLevelIdx++;
}
}
return verticesAtLevel;
}
// Returns an array with the prefix sums of the provided counts array
int* prefixSums(int* counts, int height){
// Create a copy of the provided array to act upon and return
// maxLevel + 1 because levels start from 0
int* prefix_sums;
prefix_sums = malloc((height + 1) * sizeof(int));
memcpy(prefix_sums, counts, (height + 1) * sizeof(int));
// Check if work is worth done sequentially
if(height < PREFIX_SUMS_SEQ_LIMIT){
prefixSumsSeq(prefix_sums, height + 1);
return prefix_sums;
}
// Get and set maximum number of threads and calculate work per thread and number of switching stages
int maxThreads = PREFIX_SUMS_THREADS;
// omp_set_num_threads(maxThreads);
// We assume a system with threads at a power of 2
int num_changes = (int)log2(maxThreads);
// Chunk up the work
int chunk = (height + 1) / maxThreads;
int remainder = (height + 1) % maxThreads;
int workIndexes[maxThreads + 1];
workIndexes[0] = 0;
for(int i = 1, sum = 0; i < maxThreads + 1; i++){
sum += chunk;
if(i <= remainder)
sum++;
workIndexes[i] = sum;
}
// Parallel calculate local prefix sums
int cPrefix[maxThreads], cTotal[maxThreads], lPrefix[maxThreads], lTotal[maxThreads];
#pragma omp parallel shared(prefix_sums, workIndexes, cPrefix, cTotal, lPrefix, lTotal, maxThreads, num_changes) \
default(none) num_threads(PREFIX_SUMS_THREADS)
{
int tid = omp_get_thread_num();
int elementsNum = workIndexes[tid+1] - workIndexes[tid];
prefixSumsSeq(prefix_sums + workIndexes[tid], elementsNum);
cPrefix[tid] = cTotal[tid] = prefix_sums[workIndexes[tid+1] - 1];
lPrefix[tid] = lTotal[tid] = prefix_sums[workIndexes[tid+1] - 1];
#pragma omp barrier
for(int i = 0; i < num_changes; i++){
int tidn = tid ^ ((int)pow(2, i));
if((tidn < maxThreads) && (tidn != tid)){
if(tidn > tid){
lPrefix[tidn] += cTotal[tid];
lTotal[tidn] += cTotal[tid];
}else{
lTotal[tidn] += cTotal[tid];
}
}
// Sync threads before writing to the cross arrays (cTotal/cPrefix)
#pragma omp barrier
cPrefix[tid] = lPrefix[tid];
cTotal[tid] = lTotal[tid];
#pragma omp barrier
}
if(tid != 0){
for(int j = workIndexes[tid]; j < workIndexes[tid+1]; j++){
prefix_sums[j] += cPrefix[tid - 1];
}
}
}
return prefix_sums;
}
// Sequential prefix sums on array
void prefixSumsSeq(int* array, int elements){
int sum = 0;
for(int i = 0; i < elements; i++){
sum += array[i];
array[i] = sum;
}
} |
loop-7.c | /* { dg-do run } */
#include <omp.h>
extern void abort (void);
#define LLONG_MAX __LONG_LONG_MAX__
#define ULLONG_MAX (LLONG_MAX * 2ULL + 1)
#define INT_MAX __INT_MAX__
int v;
int
test1 (void)
{
int e = 0, cnt = 0;
long long i;
unsigned long long j;
char buf[6], *p;
#pragma omp for schedule(dynamic,1) collapse(2) nowait
for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
for (j = 20; j <= LLONG_MAX - 70; j += LLONG_MAX + 50ULL)
if ((i != LLONG_MAX - 30001
&& i != LLONG_MAX - 20001
&& i != LLONG_MAX - 10001)
|| j != 20)
e = 1;
else
cnt++;
if (e || cnt != 3)
abort ();
else
cnt = 0;
#pragma omp for schedule(guided,1) collapse(2) nowait
for (i = -LLONG_MAX + 30000; i >= -LLONG_MAX + 10000; i -= 10000)
for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
if ((i != -LLONG_MAX + 30000
&& i != -LLONG_MAX + 20000
&& i != -LLONG_MAX + 10000)
|| j != ULLONG_MAX - 3)
e = 1;
else
cnt++;
if (e || cnt != 3)
abort ();
else
cnt = 0;
#pragma omp for schedule(static,1) collapse(2) nowait
for (i = LLONG_MAX - 30001; i <= LLONG_MAX - 10001; i += 10000)
for (j = 20; j <= LLONG_MAX - 70 + v; j += LLONG_MAX + 50ULL)
if ((i != LLONG_MAX - 30001
&& i != LLONG_MAX - 20001
&& i != LLONG_MAX - 10001)
|| j != 20)
e = 1;
else
cnt++;
if (e || cnt != 3)
abort ();
else
cnt = 0;
#pragma omp for schedule(static) collapse(2) nowait
for (i = -LLONG_MAX + 30000 + v; i >= -LLONG_MAX + 10000; i -= 10000)
for (j = ULLONG_MAX - 3; j >= LLONG_MAX + 70ULL; j -= LLONG_MAX + 50ULL)
if ((i != -LLONG_MAX + 30000
&& i != -LLONG_MAX + 20000
&& i != -LLONG_MAX + 10000)
|| j != ULLONG_MAX - 3)
e = 1;
else
cnt++;
if (e || cnt != 3)
abort ();
else
cnt = 0;
#pragma omp for schedule(runtime) collapse(2) nowait
for (i = 10; i < 30; i++)
for (p = buf; p <= buf + 4; p += 2)
if (i < 10 || i >= 30 || (p != buf && p != buf + 2 && p != buf + 4))
e = 1;
else
cnt++;
if (e || cnt != 60)
abort ();
else
cnt = 0;
return 0;
}
int
main (void)
{
if (2 * sizeof (int) != sizeof (long long))
return 0;
asm volatile ("" : "+r" (v));
omp_set_schedule (omp_sched_dynamic, 1);
test1 ();
return 0;
}
|
2911.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (i, j, k) num_threads(#P11)
{
/* E := A*B */
for (i = 0; i < _PB_NI; i++)
{
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
for (i = 0; i < _PB_NJ; i++)
{
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
for (i = 0; i < _PB_NI; i++)
{
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
gbdt.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_BOOSTING_GBDT_H_
#define LIGHTGBM_BOOSTING_GBDT_H_
#include <LightGBM/boosting.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/cuda/vector_cudahost.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/threading.h>
#include <string>
#include <algorithm>
#include <cstdio>
#include <fstream>
#include <map>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <utility>
#include <vector>
#include "score_updater.hpp"
namespace LightGBM {
using json11::Json;
/*!
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
/*!
* \brief Constructor
*/
GBDT();
/*!
* \brief Destructor
*/
~GBDT();
/*!
* \brief Initialization logic
* \param gbdt_config Config for boosting
* \param train_data Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void Init(const Config* gbdt_config, const Dataset* train_data,
const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Merge model from other boosting object. Will insert to the front of current boosting object
* \param other
*/
void MergeFrom(const Boosting* other) override {
auto other_gbdt = reinterpret_cast<const GBDT*>(other);
// tmp move to other vector
auto original_models = std::move(models_);
models_ = std::vector<std::unique_ptr<Tree>>();
// push model from other first
for (const auto& tree : other_gbdt->models_) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
// push model in current object
for (const auto& tree : original_models) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
}
void ShuffleModels(int start_iter, int end_iter) override {
int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iter = std::max(0, start_iter);
if (end_iter <= 0) {
end_iter = total_iter;
}
end_iter = std::min(total_iter, end_iter);
auto original_models = std::move(models_);
std::vector<int> indices(total_iter);
for (int i = 0; i < total_iter; ++i) {
indices[i] = i;
}
Random tmp_rand(17);
for (int i = start_iter; i < end_iter - 1; ++i) {
int j = tmp_rand.NextShort(i + 1, end_iter);
std::swap(indices[i], indices[j]);
}
models_ = std::vector<std::unique_ptr<Tree>>();
for (int i = 0; i < total_iter; ++i) {
for (int j = 0; j < num_tree_per_iteration_; ++j) {
int tree_idx = indices[i] * num_tree_per_iteration_ + j;
auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get())));
models_.push_back(std::move(new_tree));
}
}
}
/*!
* \brief Reset the training data
* \param train_data New Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Reset Boosting Config
* \param gbdt_config Config for boosting
*/
void ResetConfig(const Config* gbdt_config) override;
/*!
* \brief Adding a validation dataset
* \param valid_data Validation dataset
* \param valid_metrics Metrics for validation dataset
*/
void AddValidDataset(const Dataset* valid_data,
const std::vector<const Metric*>& valid_metrics) override;
/*!
* \brief Perform a full training procedure
* \param snapshot_freq frequency of snapshot
* \param model_output_path path of model file
*/
void Train(int snapshot_freq, const std::string& model_output_path) override;
void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override;
/*!
* \brief Training logic
* \param gradients nullptr for using default objective, otherwise use self-defined boosting
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
/*!
* \brief Rollback one iteration
*/
void RollbackOneIter() override;
/*!
* \brief Get current iteration
*/
int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; }
/*!
* \brief Can use early stopping for prediction or not
* \return True if cannot use early stopping for prediction
*/
bool NeedAccuratePrediction() const override {
if (objective_function_ == nullptr) {
return true;
} else {
return objective_function_->NeedAccuratePrediction();
}
}
/*!
* \brief Get evaluation result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return evaluation result
*/
std::vector<double> GetEvalAt(int data_idx) const override;
/*!
* \brief Get current training score
* \param out_len length of returned score
* \return training score
*/
const double* GetTrainingScore(int64_t* out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
num_data = valid_score_updater_[data_idx - 1]->num_data();
}
return num_data * num_class_;
}
/*!
* \brief Get prediction result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \param result used to store prediction result, should allocate memory before call this function
* \param out_len length of returned score
*/
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override;
/*!
* \brief Get number of prediction for one data
* \param start_iteration Start index of the iteration to predict
* \param num_iteration number of used iterations
* \param is_pred_leaf True if predicting leaf index
* \param is_pred_contrib True if predicting feature contribution
* \return number of prediction
*/
inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override {
int num_pred_in_one_row = num_class_;
if (is_pred_leaf) {
int max_iteration = GetCurrentIteration();
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, max_iteration);
if (num_iteration > 0) {
num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration));
} else {
num_pred_in_one_row *= (max_iteration - start_iteration);
}
} else if (is_pred_contrib) {
num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline
}
return num_pred_in_one_row;
}
void PredictRaw(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictRawByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void Predict(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void PredictLeafIndex(const double* features, double* output) const override;
void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override;
void PredictContrib(const double* features, double* output) const override;
void PredictContribByMap(const std::unordered_map<int, double>& features,
std::vector<std::unordered_map<int, double>>* output) const override;
/*!
* \brief Dump model to json format string
* \param start_iteration The model will be saved start from
* \param num_iteration Number of iterations that want to dump, -1 means dump all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Json format string of model
*/
std::string DumpModel(int start_iteration, int num_iteration,
int feature_importance_type) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \return if-else format codes of model
*/
std::string ModelToIfElse(int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToIfElse(int num_iteration, const char* filename) const override;
/*!
* \brief Save model to file
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToFile(int start_iteration, int num_iterations,
int feature_importance_type,
const char* filename) const override;
/*!
* \brief Save model to string
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Non-empty string if succeeded
*/
std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override;
/*!
* \brief Restore from a serialized buffer
*/
bool LoadModelFromString(const char* buffer, size_t len) override;
/*!
* \brief Calculate feature importances
* \param num_iteration Number of model that want to use for feature importance, -1 means use all
* \param importance_type: 0 for split, 1 for gain
* \return vector of feature_importance
*/
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override;
/*!
* \brief Calculate upper bound value
* \return upper bound value
*/
double GetUpperBoundValue() const override;
/*!
* \brief Calculate lower bound value
* \return lower bound value
*/
double GetLowerBoundValue() const override;
/*!
* \brief Get max feature index of this model
* \return Max feature index of this model
*/
inline int MaxFeatureIdx() const override { return max_feature_idx_; }
/*!
* \brief Get feature names of this model
* \return Feature names of this model
*/
inline std::vector<std::string> FeatureNames() const override { return feature_names_; }
/*!
* \brief Get index of label column
* \return index of label column
*/
inline int LabelIdx() const override { return label_idx_; }
/*!
* \brief Get number of weak sub-models
* \return Number of weak sub-models
*/
inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); }
/*!
* \brief Get number of tree per iteration
* \return number of tree per iteration
*/
inline int NumModelPerIteration() const override { return num_tree_per_iteration_; }
/*!
* \brief Get number of classes
* \return Number of classes
*/
inline int NumberOfClasses() const override { return num_class_; }
inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override {
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, num_iteration_for_pred_);
if (num_iteration > 0) {
num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration);
} else {
num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration;
}
start_iteration_for_pred_ = start_iteration;
if (is_pred_contrib) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(models_.size()); ++i) {
models_[i]->RecomputeMaxDepth();
}
}
}
inline double GetLeafValue(int tree_idx, int leaf_idx) const override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
return models_[tree_idx]->LeafOutput(leaf_idx);
}
inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
models_[tree_idx]->SetLeafOutput(leaf_idx, val);
}
/*!
* \brief Get Type name of this boosting object
*/
const char* SubModelName() const override { return "tree"; }
bool IsLinear() const override { return linear_tree_; }
inline std::string ParserConfigStr() const override {return parser_config_str_;}
protected:
virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) {
if (objective_function != nullptr) {
return objective_function->IsConstantHessian();
} else {
return false;
}
}
/*!
* \brief Print eval result and check early stopping
*/
virtual bool EvalAndCheckEarlyStopping();
/*!
* \brief reset config for bagging
*/
void ResetBaggingConfig(const Config* config, bool is_change_dataset);
/*!
* \brief Implement bagging logic
* \param iter Current interation
*/
virtual void Bagging(int iter);
virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
/*!
* \brief calculate the object function
*/
virtual void Boosting();
/*!
* \brief updating score after tree was trained
* \param tree Trained tree of this iteration
* \param cur_tree_id Current tree for multiclass training
*/
virtual void UpdateScore(const Tree* tree, const int cur_tree_id);
/*!
* \brief eval results for one metric
*/
virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const;
/*!
* \brief Print metric result of current iteration
* \param iter Current iteration
* \return best_msg if met early_stopping
*/
std::string OutputMetric(int iter);
double BoostFromAverage(int class_id, bool update_scorer);
/*! \brief current iteration */
int iter_;
/*! \brief Pointer to training data */
const Dataset* train_data_;
/*! \brief Config of gbdt */
std::unique_ptr<Config> config_;
/*! \brief Tree learner, will use this class to learn trees */
std::unique_ptr<TreeLearner> tree_learner_;
/*! \brief Objective function */
const ObjectiveFunction* objective_function_;
/*! \brief Store and update training data's score */
std::unique_ptr<ScoreUpdater> train_score_updater_;
/*! \brief Metrics for training data */
std::vector<const Metric*> training_metrics_;
/*! \brief Store and update validation data's scores */
std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_;
/*! \brief Metric for validation data */
std::vector<std::vector<const Metric*>> valid_metrics_;
/*! \brief Number of rounds for early stopping */
int early_stopping_round_;
/*! \brief Only use first metric for early stopping */
bool es_first_metric_only_;
/*! \brief Best iteration(s) for early stopping */
std::vector<std::vector<int>> best_iter_;
/*! \brief Best score(s) for early stopping */
std::vector<std::vector<double>> best_score_;
/*! \brief output message of best iteration */
std::vector<std::vector<std::string>> best_msg_;
/*! \brief Trained models(trees) */
std::vector<std::unique_ptr<Tree>> models_;
/*! \brief Max feature index of training data*/
int max_feature_idx_;
/*! \brief Parser config file content */
std::string parser_config_str_ = "";
#ifdef USE_CUDA
/*! \brief First order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> hessians_;
#else
/*! \brief First order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_;
#endif
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_;
/*! \brief Number of in-bag data */
data_size_t bag_data_cnt_;
/*! \brief Number of training data */
data_size_t num_data_;
/*! \brief Number of trees per iterations */
int num_tree_per_iteration_;
/*! \brief Number of class */
int num_class_;
/*! \brief Index of label column */
data_size_t label_idx_;
/*! \brief number of used model */
int num_iteration_for_pred_;
/*! \brief Start iteration of used model */
int start_iteration_for_pred_;
/*! \brief Shrinkage rate for one iteration */
double shrinkage_rate_;
/*! \brief Number of loaded initial models */
int num_init_iteration_;
/*! \brief Feature names */
std::vector<std::string> feature_names_;
std::vector<std::string> feature_infos_;
std::unique_ptr<Dataset> tmp_subset_;
bool is_use_subset_;
std::vector<bool> class_need_train_;
bool is_constant_hessian_;
std::unique_ptr<ObjectiveFunction> loaded_objective_;
bool average_output_;
bool need_re_bagging_;
bool balanced_bagging_;
std::string loaded_parameter_;
std::vector<int8_t> monotone_constraints_;
const int bagging_rand_block_ = 1024;
std::vector<Random> bagging_rands_;
ParallelPartitionRunner<data_size_t, false> bagging_runner_;
Json forced_splits_json_;
bool linear_tree_;
};
} // namespace LightGBM
#endif // LightGBM_BOOSTING_GBDT_H_
|
hello-omp.c | #include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
/*
Exercise 1:
Intent : Write a helloworld program with OpenMP.
Goals:
1. Include OpenMP header
2. Within the scope of #pragma omp parallel
a. Each thread queries it thread ID.
b. Each thread then prints its thread ID.
*/
void main (){
//Initialize parallel region here
{
int thread_id;
}
}
|
GB_binop__min_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__min_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__min_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__min_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_int64)
// A*D function (colscale): GB (_AxD__min_int64)
// D*A function (rowscale): GB (_DxB__min_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__min_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__min_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_int64)
// C=scalar+B GB (_bind1st__min_int64)
// C=scalar+B' GB (_bind1st_tran__min_int64)
// C=A+scalar GB (_bind2nd__min_int64)
// C=A'+scalar GB (_bind2nd_tran__min_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMIN (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_INT64 || GxB_NO_MIN_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__min_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__min_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__min_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test001_privateLock.c | /*
a lock should be shared.
But a locally declared lock within a parallel region is private.
*/
#include <omp.h>
void foo()
{
#pragma omp parallel
{
omp_lock_t lck;
omp_set_lock(&lck);
printf("Thread = %d\n", omp_get_thread_num());
omp_unset_lock(&lck);
}
}
|
client_utils.h | // Copyright (c) 2020 - present Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#ifndef CLIENT_UTILS_H
#define CLIENT_UTILS_H
#include <algorithm>
#include <complex>
#include <iostream>
#include <mutex>
#include <numeric>
#include <omp.h>
#include <random>
#include <tuple>
#include <vector>
#include "../shared/printbuffer.h"
#include "rocfft.h"
#include <hip/hip_runtime_api.h>
// Determine the size of the data type given the precision and type.
template <typename Tsize>
inline Tsize var_size(const rocfft_precision precision, const rocfft_array_type type)
{
size_t var_size = 0;
switch(precision)
{
case rocfft_precision_single:
var_size = sizeof(float);
break;
case rocfft_precision_double:
var_size = sizeof(double);
break;
}
switch(type)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
var_size *= 2;
break;
default:
break;
}
return var_size;
}
// Container class for test parameters.
class rocfft_params
{
public:
// All parameters are row-major.
std::vector<size_t> length;
std::vector<size_t> istride;
std::vector<size_t> ostride;
size_t nbatch = 1;
rocfft_precision precision = rocfft_precision_double;
rocfft_transform_type transform_type = rocfft_transform_type_complex_forward;
rocfft_result_placement placement = rocfft_placement_inplace;
size_t idist = 0;
size_t odist = 0;
rocfft_array_type itype = rocfft_array_type_complex_interleaved;
rocfft_array_type otype = rocfft_array_type_complex_interleaved;
std::vector<size_t> ioffset = {0, 0};
std::vector<size_t> ooffset = {0, 0};
std::vector<size_t> isize;
std::vector<size_t> osize;
// run testing load/store callbacks
bool run_callbacks = false;
static constexpr double load_cb_scalar = 0.457813941;
static constexpr double store_cb_scalar = 0.391504938;
// Given an array type, return the name as a string.
std::string array_type_name(const rocfft_array_type type) const
{
switch(type)
{
case rocfft_array_type_complex_interleaved:
return "rocfft_array_type_complex_interleaved";
case rocfft_array_type_complex_planar:
return "rocfft_array_type_complex_planar";
case rocfft_array_type_real:
return "rocfft_array_type_real";
case rocfft_array_type_hermitian_interleaved:
return "rocfft_array_type_hermitian_interleaved";
case rocfft_array_type_hermitian_planar:
return "rocfft_array_type_hermitian_planar";
case rocfft_array_type_unset:
return "rocfft_array_type_unset";
}
return "";
}
// Convert to string for output.
std::string str(const std::string& separator = ", ") const
{
std::stringstream ss;
ss << "length:";
for(auto i : length)
ss << " " << i;
ss << separator;
ss << "istride:";
for(auto i : istride)
ss << " " << i;
ss << separator;
ss << "idist: " << idist << separator;
ss << "ostride:";
for(auto i : ostride)
ss << " " << i;
ss << separator;
ss << "odist: " << odist << separator;
ss << "batch: " << nbatch << separator;
ss << "isize:";
for(auto i : isize)
ss << " " << i;
ss << separator;
ss << "osize:";
for(auto i : osize)
ss << " " << i;
ss << separator;
ss << "ioffset:";
for(auto i : ioffset)
ss << " " << i;
ss << separator;
ss << "ooffset:";
for(auto i : ooffset)
ss << " " << i;
ss << separator;
if(placement == rocfft_placement_inplace)
ss << "in-place";
else
ss << "out-of-place";
ss << separator;
ss << array_type_name(itype) << " -> " << array_type_name(otype) << separator;
if(precision == rocfft_precision_single)
ss << "single-precision";
else
ss << "double-precision";
ss << separator;
ss << "ilength:";
for(const auto i : ilength())
ss << " " << i;
ss << separator;
ss << "olength:";
for(const auto i : olength())
ss << " " << i;
return ss.str();
}
// Stream output operator (for gtest, etc).
friend std::ostream& operator<<(std::ostream& stream, const rocfft_params& params)
{
stream << params.str();
return stream;
}
// Dimension of the transform.
size_t dim() const
{
return length.size();
}
std::vector<size_t> ilength() const
{
auto ilength = length;
if(transform_type == rocfft_transform_type_real_inverse)
ilength[dim() - 1] = ilength[dim() - 1] / 2 + 1;
return ilength;
}
std::vector<size_t> olength() const
{
auto olength = length;
if(transform_type == rocfft_transform_type_real_forward)
olength[dim() - 1] = olength[dim() - 1] / 2 + 1;
return olength;
}
size_t nbuffer(const rocfft_array_type type) const
{
switch(type)
{
case rocfft_array_type_real:
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
return 1;
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
return 2;
case rocfft_array_type_unset:
return 0;
}
}
// Number of input buffers
size_t nibuffer() const
{
return nbuffer(itype);
}
// Number of output buffers
size_t nobuffer() const
{
return nbuffer(otype);
}
auto compute_isize() const
{
auto il = ilength();
size_t val = nbatch * idist;
for(int i = 0; i < il.size(); ++i)
{
val = std::max(val, il[i] * istride[i]);
}
std::vector<size_t> isize(nibuffer());
for(int i = 0; i < isize.size(); ++i)
{
isize[i] = val + ioffset[i];
}
return isize;
}
auto compute_osize() const
{
auto ol = olength();
size_t val = nbatch * odist;
for(int i = 0; i < ol.size(); ++i)
{
val = std::max(val, ol[i] * ostride[i]);
}
std::vector<size_t> osize(nobuffer());
for(int i = 0; i < osize.size(); ++i)
{
osize[i] = val + ooffset[i];
}
return osize;
}
std::vector<size_t> ibuffer_sizes() const
{
std::vector<size_t> ibuffer_sizes;
if(isize.empty())
return ibuffer_sizes;
switch(itype)
{
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
ibuffer_sizes.resize(2);
break;
default:
ibuffer_sizes.resize(1);
}
for(unsigned i = 0; i < ibuffer_sizes.size(); i++)
{
ibuffer_sizes[i] = isize[i] * var_size<size_t>(precision, itype);
}
return ibuffer_sizes;
}
std::vector<size_t> obuffer_sizes() const
{
std::vector<size_t> obuffer_sizes;
if(osize.empty())
return obuffer_sizes;
switch(otype)
{
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
obuffer_sizes.resize(2);
break;
default:
obuffer_sizes.resize(1);
}
for(unsigned i = 0; i < obuffer_sizes.size(); i++)
{
obuffer_sizes[i] = osize[i] * var_size<size_t>(precision, otype);
}
return obuffer_sizes;
}
// Estimate the amount of host memory needed.
size_t needed_ram(const int verbose) const
{
// Host input, output, and input copy: 3 buffers, all contiguous.
size_t needed_ram
= 3 * std::accumulate(length.begin(), length.end(), 1, std::multiplies<size_t>());
// GPU input buffer:
needed_ram += std::inner_product(length.begin(), length.end(), istride.begin(), 0);
// GPU output buffer:
needed_ram += std::inner_product(length.begin(), length.end(), ostride.begin(), 0);
// Account for precision and data type:
if(transform_type != rocfft_transform_type_real_forward
&& transform_type != rocfft_transform_type_real_inverse)
{
needed_ram *= 2;
}
switch(precision)
{
case rocfft_precision_single:
needed_ram *= 4;
break;
case rocfft_precision_double:
needed_ram *= 8;
break;
}
needed_ram *= nbatch;
if(verbose > 1)
{
std::cout << "required host memory (GB): " << needed_ram * 1e-9 << std::endl;
}
return needed_ram;
}
// Column-major getters:
std::vector<size_t> ilength_cm() const
{
auto ilength_cm = ilength();
std::reverse(std::begin(ilength_cm), std::end(ilength_cm));
return ilength_cm;
}
std::vector<size_t> olength_cm() const
{
auto olength_cm = olength();
std::reverse(std::begin(olength_cm), std::end(olength_cm));
return olength_cm;
}
std::vector<size_t> length_cm() const
{
auto length_cm = length;
std::reverse(std::begin(length_cm), std::end(length_cm));
return length_cm;
}
std::vector<size_t> istride_cm() const
{
auto istride_cm = istride;
std::reverse(std::begin(istride_cm), std::end(istride_cm));
return istride_cm;
}
std::vector<size_t> ostride_cm() const
{
auto ostride_cm = ostride;
std::reverse(std::begin(ostride_cm), std::end(ostride_cm));
return ostride_cm;
}
// Return true if the given GPU parameters would produce a valid transform.
bool valid(const int verbose) const
{
if(ioffset.size() < nibuffer() || ooffset.size() < nobuffer())
return false;
// Check that in-place transforms have the same input and output stride:
if(placement == rocfft_placement_inplace)
{
const auto stridesize = std::min(istride.size(), ostride.size());
bool samestride = true;
for(int i = 0; i < stridesize; ++i)
{
if(istride[i] != ostride[i])
samestride = false;
}
if(!samestride)
{
// In-place transforms require identical input and output strides.
if(verbose)
{
std::cout << "istride:";
for(const auto& i : istride)
std::cout << " " << i;
std::cout << " ostride0:";
for(const auto& i : ostride)
std::cout << " " << i;
std::cout << " differ; skipped for in-place transforms: skipping test"
<< std::endl;
}
// TODO: mark skipped
return false;
}
if((transform_type == rocfft_transform_type_real_forward
|| transform_type == rocfft_transform_type_real_inverse)
&& (istride[0] != 1 || ostride[0] != 1))
{
// In-place real/complex transforms require unit strides.
if(verbose)
{
std::cout
<< "istride[0]: " << istride[0] << " ostride[0]: " << ostride[0]
<< " must be unitary for in-place real/complex transforms: skipping test"
<< std::endl;
}
return false;
}
if((itype == rocfft_array_type_complex_interleaved
&& otype == rocfft_array_type_complex_planar)
|| (itype == rocfft_array_type_complex_planar
&& otype == rocfft_array_type_complex_interleaved))
{
if(verbose)
{
std::cout << "In-place c2c transforms require identical io types; skipped.\n";
}
return false;
}
// Check offsets
switch(transform_type)
{
case rocfft_transform_type_complex_forward:
case rocfft_transform_type_complex_inverse:
for(int i = 0; i < nibuffer(); ++i)
{
if(ioffset[i] != ooffset[i])
return false;
}
break;
case rocfft_transform_type_real_forward:
if(ioffset[0] != 2 * ooffset[0])
return false;
break;
case rocfft_transform_type_real_inverse:
if(2 * ioffset[0] != ooffset[0])
return false;
break;
}
}
// The parameters are valid.
return true;
}
};
// This is used with the program_options class so that the user can type an integer on the
// command line and we store into an enum varaible
template <typename _Elem, typename _Traits>
std::basic_istream<_Elem, _Traits>& operator>>(std::basic_istream<_Elem, _Traits>& stream,
rocfft_array_type& atype)
{
unsigned tmp;
stream >> tmp;
atype = rocfft_array_type(tmp);
return stream;
}
// similarly for transform type
template <typename _Elem, typename _Traits>
std::basic_istream<_Elem, _Traits>& operator>>(std::basic_istream<_Elem, _Traits>& stream,
rocfft_transform_type& ttype)
{
unsigned tmp;
stream >> tmp;
ttype = rocfft_transform_type(tmp);
return stream;
}
// count the number of total iterations for 1-, 2-, and 3-D dimensions
template <typename T1>
size_t count_iters(const T1& i)
{
return i;
}
template <typename T1>
size_t count_iters(const std::tuple<T1, T1>& i)
{
return std::get<0>(i) * std::get<1>(i);
}
template <typename T1>
size_t count_iters(const std::tuple<T1, T1, T1>& i)
{
return std::get<0>(i) * std::get<1>(i) * std::get<2>(i);
}
// Work out how many partitions to break our iteration problem into
template <typename T1>
static size_t compute_partition_count(T1 length)
{
#ifdef BUILD_CLIENTS_TESTS_OPENMP
// we seem to get contention from too many threads, which slows
// things down. particularly noticeable with mix_3D tests
static const size_t MAX_PARTITIONS = 8;
size_t iters = count_iters(length);
size_t hw_threads = std::min(MAX_PARTITIONS, static_cast<size_t>(omp_get_num_procs()));
if(!hw_threads)
return 1;
// don't bother threading problem sizes that are too small. pick
// an arbitrary number of iterations and ensure that each thread
// has at least that many iterations to process
static const size_t MIN_ITERS_PER_THREAD = 2048;
// either use the whole CPU, or use ceil(iters/iters_per_thread)
return std::min(hw_threads, (iters + MIN_ITERS_PER_THREAD + 1) / MIN_ITERS_PER_THREAD);
#else
return 1;
#endif
}
// Break a scalar length into some number of pieces, returning
// [(start0, end0), (start1, end1), ...]
template <typename T1>
std::vector<std::pair<T1, T1>> partition_base(const T1& length, size_t num_parts)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
// make sure we don't exceed the length
num_parts = std::min(length, num_parts);
std::vector<std::pair<T1, T1>> ret(num_parts);
auto partition_size = length / num_parts;
T1 cur_partition = 0;
for(size_t i = 0; i < num_parts; ++i, cur_partition += partition_size)
{
ret[i].first = cur_partition;
ret[i].second = cur_partition + partition_size;
}
// last partition might not divide evenly, fix it up
ret.back().second = length;
return ret;
}
// Returns pairs of startindex, endindex, for 1D, 2D, 3D lengths
template <typename T1>
std::vector<std::pair<T1, T1>> partition_rowmajor(const T1& length)
{
return partition_base(length, compute_partition_count(length));
}
// Partition on the leftmost part of the tuple, for row-major indexing
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>>
partition_rowmajor(const std::tuple<T1, T1>& length)
{
auto partitions = partition_base(std::get<0>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<0>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<0>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
}
return ret;
}
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>>
partition_rowmajor(const std::tuple<T1, T1, T1>& length)
{
auto partitions = partition_base(std::get<0>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<0>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<2>(ret[i].first) = 0;
std::get<0>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
std::get<2>(ret[i].second) = std::get<2>(length);
}
return ret;
}
// Returns pairs of startindex, endindex, for 1D, 2D, 3D lengths
template <typename T1>
std::vector<std::pair<T1, T1>> partition_colmajor(const T1& length)
{
return partition_base(length, compute_partition_count(length));
}
// Partition on the rightmost part of the tuple, for col-major indexing
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>>
partition_colmajor(const std::tuple<T1, T1>& length)
{
auto partitions = partition_base(std::get<1>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1>, std::tuple<T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<1>(ret[i].first) = partitions[i].first;
std::get<0>(ret[i].first) = 0;
std::get<1>(ret[i].second) = partitions[i].second;
std::get<0>(ret[i].second) = std::get<0>(length);
}
return ret;
}
template <typename T1>
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>>
partition_colmajor(const std::tuple<T1, T1, T1>& length)
{
auto partitions = partition_base(std::get<2>(length), compute_partition_count(length));
std::vector<std::pair<std::tuple<T1, T1, T1>, std::tuple<T1, T1, T1>>> ret(partitions.size());
for(size_t i = 0; i < partitions.size(); ++i)
{
std::get<2>(ret[i].first) = partitions[i].first;
std::get<1>(ret[i].first) = 0;
std::get<0>(ret[i].first) = 0;
std::get<2>(ret[i].second) = partitions[i].second;
std::get<1>(ret[i].second) = std::get<1>(length);
std::get<0>(ret[i].second) = std::get<0>(length);
}
return ret;
}
// Specialized computation of index given 1-, 2-, 3- dimension length + stride
template <typename T1, typename T2>
int compute_index(T1 length, T2 stride, size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (length * stride) + base;
}
template <typename T1, typename T2>
int compute_index(const std::tuple<T1, T1>& length, const std::tuple<T2, T2>& stride, size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (std::get<0>(length) * std::get<0>(stride)) + (std::get<1>(length) * std::get<1>(stride))
+ base;
}
template <typename T1, typename T2>
int compute_index(const std::tuple<T1, T1, T1>& length,
const std::tuple<T2, T2, T2>& stride,
size_t base)
{
static_assert(std::is_integral<T1>::value, "Integral required.");
static_assert(std::is_integral<T2>::value, "Integral required.");
return (std::get<0>(length) * std::get<0>(stride)) + (std::get<1>(length) * std::get<1>(stride))
+ (std::get<2>(length) * std::get<2>(stride)) + base;
}
// Given a length vector, set the rest of the strides.
// The optional argument stride0 sets the stride for the contiguous dimension.
// The optional rcpadding argument sets the stride correctly for in-place
// multi-dimensional real/complex transforms.
// Format is row-major.
template <typename T1>
inline std::vector<T1> compute_stride(const std::vector<T1>& length,
const std::vector<size_t>& stride0 = std::vector<size_t>(),
const bool rcpadding = false)
{
// We can't have more strides than dimensions:
assert(stride0.size() <= length.size());
const int dim = length.size();
std::vector<T1> stride(dim);
int dimoffset = 0;
if(stride0.size() == 0)
{
// Set the contiguous stride:
stride[dim - 1] = 1;
dimoffset = 1;
}
else
{
// Copy the input values to the end of the stride array:
for(int i = 0; i < stride0.size(); ++i)
{
stride[dim - stride0.size() + i] = stride0[i];
}
}
if(stride0.size() < dim)
{
// Compute any remaining values via recursion.
for(int i = dim - dimoffset - stride0.size(); i-- > 0;)
{
auto lengthip1 = length[i + 1];
if(rcpadding && i == dim - 2)
{
lengthip1 = 2 * (lengthip1 / 2 + 1);
}
stride[i] = stride[i + 1] * lengthip1;
}
}
return stride;
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input and output
// types are identical.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_1to1(const Tval* input,
Tval* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output[odx + ooffset[0]] = input[idx + ioffset[0]];
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type is
// planar and the output type is complex interleaved.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_2to1(const Tval* input0,
const Tval* input1,
std::complex<Tval>* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output[odx + ooffset[0]]
= std::complex<Tval>(input0[idx + ioffset[0]], input1[idx + ioffset[1]]);
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type is
// complex interleaved and the output type is planar.
template <typename Tval, typename Tint1, typename Tint2, typename Tint3>
inline void copy_buffers_1to2(const std::complex<Tval>* input,
Tval* output0,
Tval* output1,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
output0[odx + ooffset[0]] = input[idx + ioffset[0]].real();
output1[odx + ooffset[1]] = input[idx + ioffset[0]].imag();
} while(increment_rowmajor(index, length));
}
}
}
// Copy data of dimensions length with strides istride and length idist between batches to
// a buffer with strides ostride and length odist between batches. The input type given
// by itype, and the output type is given by otype.
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline void copy_buffers(const std::vector<std::vector<char, Tallocator1>>& input,
std::vector<std::vector<char, Tallocator2>>& output,
const Tint1& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const Tint2& istride,
const size_t idist,
const rocfft_array_type otype,
const Tint3& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
if(itype == otype)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
switch(precision)
{
case rocfft_precision_single:
copy_buffers_1to1(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_1to1(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
break;
case rocfft_array_type_real:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
switch(precision)
{
case rocfft_precision_single:
copy_buffers_1to1(reinterpret_cast<const float*>(input[idx].data()),
reinterpret_cast<float*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_1to1(reinterpret_cast<const double*>(input[idx].data()),
reinterpret_cast<double*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
break;
default:
throw std::runtime_error("Invalid data type");
break;
}
}
else if((itype == rocfft_array_type_complex_interleaved
&& otype == rocfft_array_type_complex_planar)
|| (itype == rocfft_array_type_hermitian_interleaved
&& otype == rocfft_array_type_hermitian_planar))
{
// copy 1to2
switch(precision)
{
case rocfft_precision_single:
copy_buffers_1to2(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<float*>(output[0].data()),
reinterpret_cast<float*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_1to2(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<double*>(output[0].data()),
reinterpret_cast<double*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
else if((itype == rocfft_array_type_complex_planar
&& otype == rocfft_array_type_complex_interleaved)
|| (itype == rocfft_array_type_hermitian_planar
&& otype == rocfft_array_type_hermitian_interleaved))
{
// copy 2 to 1
switch(precision)
{
case rocfft_precision_single:
copy_buffers_2to1(reinterpret_cast<const float*>(input[0].data()),
reinterpret_cast<const float*>(input[1].data()),
reinterpret_cast<std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
case rocfft_precision_double:
copy_buffers_2to1(reinterpret_cast<const double*>(input[0].data()),
reinterpret_cast<const double*>(input[1].data()),
reinterpret_cast<std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
ioffset,
ooffset);
break;
}
}
else
{
throw std::runtime_error("Invalid input and output types.");
}
}
// unroll arbitrary-dimension copy_buffers into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline void copy_buffers(const std::vector<std::vector<char, Tallocator1>>& input,
std::vector<std::vector<char, Tallocator2>>& output,
const std::vector<Tint1>& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const std::vector<Tint2>& istride,
const size_t idist,
const rocfft_array_type otype,
const std::vector<Tint3>& ostride,
const size_t odist,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
switch(length.size())
{
case 1:
return copy_buffers(input,
output,
length[0],
nbatch,
precision,
itype,
istride[0],
idist,
otype,
ostride[0],
odist,
ioffset,
ooffset);
case 2:
return copy_buffers(input,
output,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1]),
odist,
ioffset,
ooffset);
case 3:
return copy_buffers(input,
output,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1], ostride[2]),
odist,
ioffset,
ooffset);
default:
abort();
}
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. Both buffers are of complex type.
struct VectorNorms
{
double l_2 = 0.0, l_inf = 0.0;
};
template <typename Tcomplex, typename Tint1, typename Tint2, typename Tint3>
inline VectorNorms distance_1to1_complex(const Tcomplex* input,
const Tcomplex* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_colmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double rdiff
= std::abs(output[odx + ooffset[0]].real() - input[idx + ioffset[0]].real());
cur_linf = std::max(rdiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += rdiff * rdiff;
const double idiff
= std::abs(output[odx + ooffset[0]].imag() - input[idx + ioffset[0]].imag());
cur_linf = std::max(idiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += idiff * idiff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. Both buffers are of real type.
template <typename Tfloat, typename Tint1, typename Tint2, typename Tint3>
inline VectorNorms distance_1to1_real(const Tfloat* input,
const Tfloat* output,
const Tint1& whole_length,
const size_t nbatch,
const Tint2& istride,
const size_t idist,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double diff = std::abs(output[odx + ooffset[0]] - input[idx + ioffset[0]]);
cur_linf = std::max(diff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += diff * diff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 distance between two buffers with strides istride and
// length idist between batches to a buffer with strides ostride and length odist between
// batches. input is complex-interleaved, output is complex-planar.
template <typename Tval, typename Tint1, typename T2, typename T3>
inline VectorNorms distance_1to2(const std::complex<Tval>* input,
const Tval* output0,
const Tval* output1,
const Tint1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const T3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
double linf = 0.0;
double l2 = 0.0;
std::mutex linf_failure_lock;
const bool idx_equals_odx = istride == ostride && idist == odist;
size_t idx_base = 0;
size_t odx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist, odx_base += odist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const int odx = idx_equals_odx ? idx : compute_index(index, ostride, odx_base);
const double rdiff
= std::abs(output0[odx + ooffset[0]] - input[idx + ioffset[0]].real());
cur_linf = std::max(rdiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += rdiff * rdiff;
const double idiff
= std::abs(output1[odx + ooffset[1]] - input[idx + ioffset[0]].imag());
cur_linf = std::max(idiff, cur_linf);
if(cur_linf > linf_cutoff)
{
std::pair<size_t, size_t> fval(b, idx);
linf_failure_lock.lock();
linf_failures.push_back(fval);
linf_failure_lock.unlock();
}
cur_l2 += idiff * idiff;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-inifnity and L-2 distance between two buffers of dimension length and
// with types given by itype, otype, and precision.
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline VectorNorms distance(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<std::vector<char, Tallocator2>>& output,
const Tint1& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const Tint2& istride,
const size_t idist,
const rocfft_array_type otype,
const Tint3& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
VectorNorms dist;
if(itype == otype)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
switch(precision)
{
case rocfft_precision_single:
dist = distance_1to1_complex(
reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<const std::complex<float>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
dist = distance_1to1_complex(
reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<const std::complex<double>*>(output[0].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
break;
case rocfft_array_type_real:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
VectorNorms d;
switch(precision)
{
case rocfft_precision_single:
d = distance_1to1_real(reinterpret_cast<const float*>(input[idx].data()),
reinterpret_cast<const float*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
d = distance_1to1_real(reinterpret_cast<const double*>(input[idx].data()),
reinterpret_cast<const double*>(output[idx].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_inf = std::max(d.l_inf, dist.l_inf);
dist.l_2 += d.l_2 * d.l_2;
}
break;
default:
throw std::runtime_error("Invalid input and output types.");
break;
}
}
else if((itype == rocfft_array_type_complex_interleaved
&& otype == rocfft_array_type_complex_planar)
|| (itype == rocfft_array_type_hermitian_interleaved
&& otype == rocfft_array_type_hermitian_planar))
{
switch(precision)
{
case rocfft_precision_single:
dist = distance_1to2(reinterpret_cast<const std::complex<float>*>(input[0].data()),
reinterpret_cast<const float*>(output[0].data()),
reinterpret_cast<const float*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
dist = distance_1to2(reinterpret_cast<const std::complex<double>*>(input[0].data()),
reinterpret_cast<const double*>(output[0].data()),
reinterpret_cast<const double*>(output[1].data()),
length,
nbatch,
istride,
idist,
ostride,
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
}
else if((itype == rocfft_array_type_complex_planar
&& otype == rocfft_array_type_complex_interleaved)
|| (itype == rocfft_array_type_hermitian_planar
&& otype == rocfft_array_type_hermitian_interleaved))
{
switch(precision)
{
case rocfft_precision_single:
dist = distance_1to2(reinterpret_cast<const std::complex<float>*>(output[0].data()),
reinterpret_cast<const float*>(input[0].data()),
reinterpret_cast<const float*>(input[1].data()),
length,
nbatch,
ostride,
odist,
istride,
idist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
case rocfft_precision_double:
dist = distance_1to2(reinterpret_cast<const std::complex<double>*>(output[0].data()),
reinterpret_cast<const double*>(input[0].data()),
reinterpret_cast<const double*>(input[1].data()),
length,
nbatch,
ostride,
odist,
istride,
idist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
break;
}
dist.l_2 *= dist.l_2;
}
else
{
throw std::runtime_error("Invalid input and output types.");
}
dist.l_2 = sqrt(dist.l_2);
return dist;
}
// Unroll arbitrary-dimension distance into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1,
typename Tallocator2,
typename Tint1,
typename Tint2,
typename Tint3>
inline VectorNorms distance(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<std::vector<char, Tallocator2>>& output,
const std::vector<Tint1>& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const std::vector<Tint2>& istride,
const size_t idist,
const rocfft_array_type otype,
const std::vector<Tint3>& ostride,
const size_t odist,
std::vector<std::pair<size_t, size_t>>& linf_failures,
const double linf_cutoff,
const std::vector<size_t>& ioffset,
const std::vector<size_t>& ooffset)
{
switch(length.size())
{
case 1:
return distance(input,
output,
length[0],
nbatch,
precision,
itype,
istride[0],
idist,
otype,
ostride[0],
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
case 2:
return distance(input,
output,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1]),
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
case 3:
return distance(input,
output,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
itype,
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
otype,
std::make_tuple(ostride[0], ostride[1], ostride[2]),
odist,
linf_failures,
linf_cutoff,
ioffset,
ooffset);
default:
abort();
}
}
// Compute the L-infinity and L-2 norm of a buffer with strides istride and
// length idist. Data is std::complex.
template <typename Tcomplex, typename T1, typename T2>
inline VectorNorms norm_complex(const Tcomplex* input,
const T1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
double linf = 0.0;
double l2 = 0.0;
size_t idx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const double rval = std::abs(input[idx + offset[0]].real());
cur_linf = std::max(rval, cur_linf);
cur_l2 += rval * rval;
const double ival = std::abs(input[idx + offset[0]].imag());
cur_linf = std::max(ival, cur_linf);
cur_l2 += ival * ival;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 norm of abuffer with strides istride and
// length idist. Data is real-valued.
template <typename Tfloat, typename T1, typename T2>
inline VectorNorms norm_real(const Tfloat* input,
const T1& whole_length,
const size_t nbatch,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
double linf = 0.0;
double l2 = 0.0;
size_t idx_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(size_t b = 0; b < nbatch; b++, idx_base += idist)
{
#pragma omp parallel for reduction(max : linf) reduction(+ : l2) num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
double cur_linf = 0.0;
double cur_l2 = 0.0;
auto index = partitions[part].first;
const auto length = partitions[part].second;
do
{
const int idx = compute_index(index, istride, idx_base);
const double val = std::abs(input[idx + offset[0]]);
cur_linf = std::max(val, cur_linf);
cur_l2 += val * val;
} while(increment_rowmajor(index, length));
linf = std::max(linf, cur_linf);
l2 += cur_l2;
}
}
return {.l_2 = sqrt(l2), .l_inf = linf};
}
// Compute the L-infinity and L-2 norm of abuffer with strides istride and
// length idist. Data format is given by precision and itype.
template <typename Tallocator1, typename T1, typename T2>
inline VectorNorms norm(const std::vector<std::vector<char, Tallocator1>>& input,
const T1& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type itype,
const T2& istride,
const size_t idist,
const std::vector<size_t>& offset)
{
VectorNorms norm;
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
switch(precision)
{
case rocfft_precision_single:
norm = norm_complex(reinterpret_cast<const std::complex<float>*>(input[0].data()),
length,
nbatch,
istride,
idist,
offset);
break;
case rocfft_precision_double:
norm = norm_complex(reinterpret_cast<const std::complex<double>*>(input[0].data()),
length,
nbatch,
istride,
idist,
offset);
break;
}
norm.l_2 *= norm.l_2;
break;
case rocfft_array_type_real:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
for(int idx = 0; idx < input.size(); ++idx)
{
VectorNorms n;
switch(precision)
{
case rocfft_precision_single:
n = norm_real(reinterpret_cast<const float*>(input[idx].data()),
length,
nbatch,
istride,
idist,
offset);
break;
case rocfft_precision_double:
n = norm_real(reinterpret_cast<const double*>(input[idx].data()),
length,
nbatch,
istride,
idist,
offset);
break;
}
norm.l_inf = std::max(n.l_inf, norm.l_inf);
norm.l_2 += n.l_2 * n.l_2;
}
break;
default:
throw std::runtime_error("Invalid data type");
break;
}
norm.l_2 = sqrt(norm.l_2);
return norm;
}
// Unroll arbitrary-dimension norm into specializations for 1-, 2-, 3-dimensions
template <typename Tallocator1, typename T1, typename T2>
inline VectorNorms norm(const std::vector<std::vector<char, Tallocator1>>& input,
const std::vector<T1>& length,
const size_t nbatch,
const rocfft_precision precision,
const rocfft_array_type type,
const std::vector<T2>& stride,
const size_t dist,
const std::vector<size_t>& offset)
{
switch(length.size())
{
case 1:
return norm(input, length[0], nbatch, precision, type, stride[0], dist, offset);
case 2:
return norm(input,
std::make_tuple(length[0], length[1]),
nbatch,
precision,
type,
std::make_tuple(stride[0], stride[1]),
dist,
offset);
case 3:
return norm(input,
std::make_tuple(length[0], length[1], length[2]),
nbatch,
precision,
type,
std::make_tuple(stride[0], stride[1], stride[2]),
dist,
offset);
default:
abort();
}
}
// Given a buffer of complex values stored in a vector of chars (or two vectors in the
// case of planar format), impose Hermitian symmetry.
// NB: length is the dimensions of the FFT, not the data layout dimensions.
template <typename Tfloat, typename Tallocator, typename Tsize>
inline void impose_hermitian_symmetry(std::vector<std::vector<char, Tallocator>>& vals,
const std::vector<Tsize>& length,
const std::vector<Tsize>& istride,
const Tsize idist,
const Tsize nbatch)
{
switch(vals.size())
{
case 1:
{
// Complex interleaved data
for(auto ibatch = 0; ibatch < nbatch; ++ibatch)
{
auto data = ((std::complex<Tfloat>*)vals[0].data()) + ibatch * idist;
switch(length.size())
{
case 3:
if(length[2] % 2 == 0)
{
data[istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[2] % 2 == 0)
{
data[istride[0] * (length[0] / 2) + istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[1] % 2 == 0 && length[2] % 2 == 0)
{
data[istride[1] * (length[1] / 2) + istride[2] * (length[2] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[1] % 2 == 0 && length[2] % 2 == 0)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] / 2)
+ istride[2] * (length[2] / 2)]
.imag(0.0);
// clang format off
}
// y-axis:
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
data[istride[1] * (length[1] - j)] = std::conj(data[istride[1] * j]);
}
if(length[0] % 2 == 0)
{
// y-axis at x-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] - j)]
= std::conj(data[istride[0] * (length[0] / 2) + istride[1] * j]);
// clang format on
}
}
// x-axis:
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i)] = std::conj(data[istride[0] * i]);
}
if(length[1] % 2 == 0)
{
// x-axis at y-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] / 2)]
= std::conj(data[istride[0] * i + istride[1] * (length[1] / 2)]);
// clang format on
}
}
// x-y plane:
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
for(auto j = 1; j < length[1]; ++j)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] - j)]
= std::conj(data[istride[0] * i + istride[1] * j]);
// clang format on
}
}
if(length[2] % 2 == 0)
{
// x-axis at z-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[2] * (length[2] / 2)]);
}
if(length[1] % 2 == 0)
{
// x-axis at yz-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[2] * (length[2] / 2)]);
}
}
// y-axis: at z-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
data[istride[1] * (length[1] - j) + istride[2] * (length[2] / 2)]
= std::conj(data[istride[1] * j + istride[2] * (length[2] / 2)]);
}
if(length[0] % 2 == 0)
{
// y-axis: at xz-nyquist
for(auto j = 1; j < (length[1] + 1) / 2; ++j)
{
// clang format off
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] - j)
+ istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * (length[0] / 2) + istride[1] * j
+ istride[2] * (length[2] / 2)]);
// clang format on
}
}
// x-y plane: at z-nyquist
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
for(auto j = 1; j < length[1]; ++j)
{
// clang format off
data[istride[0] * (length[0] - i) + istride[1] * (length[1] - j)
+ istride[2] * (length[2] / 2)]
= std::conj(data[istride[0] * i + istride[1] * j
+ istride[2] * (length[2] / 2)]);
// clang format on
}
}
}
// fall-through
case 2:
if(length[1] % 2 == 0)
{
data[istride[1] * (length[1] / 2)].imag(0.0);
}
if(length[0] % 2 == 0 && length[1] % 2 == 0)
{
data[istride[0] * (length[0] / 2) + istride[1] * (length[1] / 2)].imag(0.0);
}
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i)] = std::conj(data[istride[0] * i]);
}
if(length[1] % 2 == 0)
{
for(auto i = 1; i < (length[0] + 1) / 2; ++i)
{
data[istride[0] * (length[0] - i) + istride[1] * (length[1] / 2)]
= std::conj(data[istride[0] * i + istride[1] * (length[1] / 2)]);
}
}
// fall-through
case 1:
data[0].imag(0.0);
if(length[0] % 2 == 0)
{
data[istride[0] * (length[0] / 2)].imag(0.0);
}
break;
default:
throw std::runtime_error("Invalid dimension for imposeHermitianSymmetry");
break;
}
}
break;
}
case 2:
{
// Complex planar data
for(auto ibatch = 0; ibatch < nbatch; ++ibatch)
{
auto idata = ((Tfloat*)vals[1].data()) + ibatch * idist;
switch(length.size())
{
case 3:
throw std::runtime_error("Not implemented");
// FIXME: implement
case 2:
throw std::runtime_error("Not implemented");
// FIXME: implement
case 1:
idata[0] = 0.0;
if(length[0] % 2 == 0)
{
idata[istride[0] * (length[0] / 2)] = 0.0;
}
break;
default:
throw std::runtime_error("Invalid dimension for imposeHermitianSymmetry");
break;
}
}
break;
}
default:
throw std::runtime_error("Invalid data type");
break;
}
}
// Given an array type and transform length, strides, etc, load random floats in [0,1]
// into the input array of floats/doubles or complex floats/doubles, which is stored in a
// vector of chars (or two vectors in the case of planar format).
// lengths are the memory lengths (ie not the transform parameters)
template <typename Tfloat, typename Tallocator, typename Tint1>
inline void set_input(std::vector<std::vector<char, Tallocator>>& input,
const rocfft_array_type itype,
const Tint1& whole_length,
const Tint1& istride,
const size_t idist,
const size_t nbatch)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_hermitian_interleaved:
{
auto idata = (std::complex<Tfloat>*)input[0].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const int i = compute_index(index, istride, i_base);
const std::complex<Tfloat> val((Tfloat)gen() / (Tfloat)gen.max(),
(Tfloat)gen() / (Tfloat)gen.max());
idata[i] = val;
} while(increment_rowmajor(index, length));
}
}
break;
}
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_planar:
{
auto ireal = (Tfloat*)input[0].data();
auto iimag = (Tfloat*)input[1].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const int i = compute_index(index, istride, i_base);
const std::complex<Tfloat> val((Tfloat)gen() / (Tfloat)gen.max(),
(Tfloat)gen() / (Tfloat)gen.max());
ireal[i] = val.real();
iimag[i] = val.imag();
} while(increment_rowmajor(index, length));
}
}
break;
}
case rocfft_array_type_real:
{
auto idata = (Tfloat*)input[0].data();
size_t i_base = 0;
auto partitions = partition_rowmajor(whole_length);
for(auto b = 0; b < nbatch; b++, i_base += idist)
{
#pragma omp parallel for num_threads(partitions.size())
for(size_t part = 0; part < partitions.size(); ++part)
{
auto index = partitions[part].first;
const auto length = partitions[part].second;
std::mt19937 gen(compute_index(index, istride, i_base));
do
{
const int i = compute_index(index, istride, i_base);
const Tfloat val = (Tfloat)gen() / (Tfloat)gen.max();
idata[i] = val;
} while(increment_rowmajor(index, length));
}
}
break;
}
default:
throw std::runtime_error("Input layout format not yet supported");
break;
}
}
// unroll set_input for dimension 1, 2, 3
template <typename Tfloat, typename Tallocator>
inline void set_input(std::vector<std::vector<char, Tallocator>>& input,
const rocfft_array_type itype,
const std::vector<size_t>& length,
const std::vector<size_t>& istride,
const size_t idist,
const size_t nbatch)
{
switch(length.size())
{
case 1:
set_input<Tfloat>(input, itype, length[0], istride[0], idist, nbatch);
break;
case 2:
set_input<Tfloat>(input,
itype,
std::make_tuple(length[0], length[1]),
std::make_tuple(istride[0], istride[1]),
idist,
nbatch);
break;
case 3:
set_input<Tfloat>(input,
itype,
std::make_tuple(length[0], length[1], length[2]),
std::make_tuple(istride[0], istride[1], istride[2]),
idist,
nbatch);
break;
default:
abort();
}
}
// Compute the idist for a given transform based on the placeness, transform type, and
// data layout.
template <typename Tsize>
inline size_t set_idist(const rocfft_result_placement place,
const rocfft_transform_type transformType,
const std::vector<Tsize>& length,
const std::vector<Tsize>& istride)
{
const Tsize dim = length.size();
// In-place 1D transforms need extra dist.
if(transformType == rocfft_transform_type_real_forward && dim == 1
&& place == rocfft_placement_inplace)
{
return 2 * (length[0] / 2 + 1) * istride[0];
}
if(transformType == rocfft_transform_type_real_inverse && dim == 1)
{
return (length[0] / 2 + 1) * istride[0];
}
Tsize idist = (transformType == rocfft_transform_type_real_inverse)
? (length[dim - 1] / 2 + 1) * istride[dim - 1]
: length[dim - 1] * istride[dim - 1];
for(int i = 0; i < dim - 1; ++i)
{
idist = std::max(length[i] * istride[i], idist);
}
return idist;
}
// Compute the odist for a given transform based on the placeness, transform type, and
// data layout. Row-major.
template <typename Tsize>
inline size_t set_odist(const rocfft_result_placement place,
const rocfft_transform_type transformType,
const std::vector<Tsize>& length,
const std::vector<Tsize>& ostride)
{
const Tsize dim = length.size();
// In-place 1D transforms need extra dist.
if(transformType == rocfft_transform_type_real_inverse && dim == 1
&& place == rocfft_placement_inplace)
{
return 2 * (length[0] / 2 + 1) * ostride[0];
}
if(transformType == rocfft_transform_type_real_forward && dim == 1)
{
return (length[0] / 2 + 1) * ostride[0];
}
Tsize odist = (transformType == rocfft_transform_type_real_forward)
? (length[dim - 1] / 2 + 1) * ostride[dim - 1]
: length[dim - 1] * ostride[dim - 1];
for(int i = 0; i < dim - 1; ++i)
{
odist = std::max(length[i] * ostride[i], odist);
}
return odist;
}
// Given a data type and precision, the distance between batches, and the batch size,
// allocate the required host buffer(s).
template <typename Allocator = std::allocator<char>>
inline std::vector<std::vector<char, Allocator>> allocate_host_buffer(
const rocfft_precision precision, const rocfft_array_type type, const std::vector<size_t>& size)
{
std::vector<std::vector<char, Allocator>> buffers(size.size());
for(int i = 0; i < size.size(); ++i)
{
buffers[i].resize(size[i] * var_size<size_t>(precision, type));
}
return buffers;
}
// Given a data type and dimensions, fill the buffer, imposing Hermitian symmetry if
// necessary.
// NB: length is the logical size of the FFT, and not necessarily the data dimensions
template <typename Allocator = std::allocator<char>>
inline std::vector<std::vector<char, Allocator>> compute_input(const rocfft_params& params)
{
auto input = allocate_host_buffer<Allocator>(params.precision, params.itype, params.isize);
for(auto& i : input)
{
std::fill(i.begin(), i.end(), 0.0);
}
switch(params.precision)
{
case rocfft_precision_double:
set_input<double>(
input, params.itype, params.ilength(), params.istride, params.idist, params.nbatch);
break;
case rocfft_precision_single:
set_input<float>(
input, params.itype, params.ilength(), params.istride, params.idist, params.nbatch);
break;
}
if(params.itype == rocfft_array_type_hermitian_interleaved
|| params.itype == rocfft_array_type_hermitian_planar)
{
switch(params.precision)
{
case rocfft_precision_double:
impose_hermitian_symmetry<double>(
input, params.length, params.istride, params.idist, params.nbatch);
break;
case rocfft_precision_single:
impose_hermitian_symmetry<float>(
input, params.length, params.istride, params.idist, params.nbatch);
break;
}
}
return input;
}
// Check that the input and output types are consistent.
inline void check_iotypes(const rocfft_result_placement place,
const rocfft_transform_type transformType,
const rocfft_array_type itype,
const rocfft_array_type otype)
{
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
case rocfft_array_type_real:
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
switch(otype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
case rocfft_array_type_real:
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
// Check that format choices are supported
if(transformType != rocfft_transform_type_real_forward
&& transformType != rocfft_transform_type_real_inverse)
{
if(place == rocfft_placement_inplace && itype != otype)
{
throw std::runtime_error(
"In-place transforms must have identical input and output types");
}
}
bool okformat = true;
switch(itype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
okformat = (otype == rocfft_array_type_complex_interleaved
|| otype == rocfft_array_type_complex_planar);
break;
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
okformat = otype == rocfft_array_type_real;
break;
case rocfft_array_type_real:
okformat = (otype == rocfft_array_type_hermitian_interleaved
|| otype == rocfft_array_type_hermitian_planar);
break;
default:
throw std::runtime_error("Invalid Input array type format");
}
switch(otype)
{
case rocfft_array_type_complex_interleaved:
case rocfft_array_type_complex_planar:
case rocfft_array_type_hermitian_interleaved:
case rocfft_array_type_hermitian_planar:
case rocfft_array_type_real:
break;
default:
okformat = false;
}
if(!okformat)
{
throw std::runtime_error("Invalid combination of Input/Output array type formats");
}
}
// Check that the input and output types are consistent. If they are unset, assign
// default values based on the transform type.
inline void check_set_iotypes(const rocfft_result_placement place,
const rocfft_transform_type transformType,
rocfft_array_type& itype,
rocfft_array_type& otype)
{
if(itype == rocfft_array_type_unset)
{
switch(transformType)
{
case rocfft_transform_type_complex_forward:
case rocfft_transform_type_complex_inverse:
itype = rocfft_array_type_complex_interleaved;
break;
case rocfft_transform_type_real_forward:
itype = rocfft_array_type_real;
break;
case rocfft_transform_type_real_inverse:
itype = rocfft_array_type_hermitian_interleaved;
break;
default:
throw std::runtime_error("Invalid transform type");
}
}
if(otype == rocfft_array_type_unset)
{
switch(transformType)
{
case rocfft_transform_type_complex_forward:
case rocfft_transform_type_complex_inverse:
otype = rocfft_array_type_complex_interleaved;
break;
case rocfft_transform_type_real_forward:
otype = rocfft_array_type_hermitian_interleaved;
break;
case rocfft_transform_type_real_inverse:
otype = rocfft_array_type_real;
break;
default:
throw std::runtime_error("Invalid transform type");
}
}
check_iotypes(place, transformType, itype, otype);
}
#endif
|
3.nowait.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h> /* OpenMP */
#define N 8
/* Q1: How does the sequence of printf change if the nowait clause is */
/* removed from the first for directive? */
/* Q2: If the nowait clause is removed in the second for pragma, will */
/* you observe any difference? */
int main()
{
int i;
omp_set_num_threads(8);
#pragma omp parallel
{
#pragma omp for schedule(static,2) //nowait
for (i=0; i < N; i++) {
int id=omp_get_thread_num();
printf("Loop 1: (%d) gets iteration %d\n",id,i);
}
#pragma omp for schedule(static, 2) nowait
for (i=0; i < N; i++) {
int id=omp_get_thread_num();
printf("Loop 2: (%d) gets iteration %d\n",id,i);
}
}
return 0;
}
|
ntlmv1_mschapv2_fmt_plug.c | /*
* Previous files MSCHAPv2_fmt_plug.c and NETNTLM_fmt_plug.c now merged into
* this one file, sharing functions.
*
* NETNTLM_fmt.c -- NTLM Challenge/Response
* Written by JoMo-Kun <jmk at foofus.net> in 2007
* and placed in the public domain.
*
* This algorithm is designed for performing brute-force cracking of the NTLM
* (version 1) challenge/response pairs exchanged during network-based
* authentication attempts [1]. The captured challenge/response pairs from these
* attempts should be stored using the L0phtCrack 2.0 LC format, specifically:
* username:unused:unused:lm response:ntlm response:challenge. For example:
*
* CORP\Administrator:::25B2B477CE101D83648BB087CE7A1C217F51C7FC64C0EBB1:
* C8BD0C1630A9ECF7A95F494A8F0B2CB4A3F25B1225514304:1122334455667788
*
* It should be noted that a NTLM authentication response is not same as a NTLM
* password hash, which can be extracted using tools such as FgDump [2]. NTLM
* responses can be gathered via normal network capture or via tools which
* perform layer 2 attacks, such as Ettercap [3] and Cain [4]. The responses can
* also be harvested using a modified Samba service [5] in conjunction with
* some trickery to convince the user to connect to it. I leave what that
* trickery may actually be as an exercise for the reader (HINT: Karma, NMB
* broadcasts, IE, Outlook, social engineering, ...).
*
* [1] http://davenport.sourceforge.net/ntlm.html#theNtlmResponse
* [2] http://www.foofus.net/~fizzgig/fgdump/
* [3] http://ettercap.sourceforge.net/
* [4] http://www.oxid.it/cain.html
* [5] http://www.foofus.net/jmk/smbchallenge.html
*
* This version supports Extended Session Security. This is what
* is used when the "LM" hash ends in 32 zeros:
*
* DOMAIN\User:::c70e4fb229437ef300000000000000000000000000000000:
* abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9:24ca92fdab441aa4
*
* MSCHAPv2_fmt.c -- Microsoft PPP CHAP Extensions, Version 2
* Written by JoMo-Kun <jmk at foofus.net> in 2010
* and placed in the public domain.
*
* Support for freeradius-wep-patch challenge/response format
* added by Linus Lüssing in 2012 and is licensed under CC0/PD terms:
* To the extent possible under law, Linus Lüssing has waived all copyright
* and related or neighboring rights to this work. This work is published from:
* Germany.
*
*
* This algorithm is designed for performing brute-force cracking of the
* MSCHAPv2 challenge/response sets exchanged during network-based
* authentication attempts. The captured challenge/response set from these
* attempts should be stored using the following format:
*
* USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* USERNAME::DOMAIN:AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* DOMAIN\USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* :::MSCHAPv2 CHALLENGE:MSCHAPv2 RESPONSE:
*
* For example:
* User:::5B5D7C7D7B3F2F3E3C2C602132262628:82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF:21402324255E262A28295F2B3A337C7E
* domain\fred:::56d64cbe7bad61349a0b752335100eaf:d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b:7f8a466cff2a6bf0c80218bbf56d76bc
*
* http://freeradius.org/rfc/rfc2759.txt
*
* Modified for performance and support for SSE2, NTLMv1 ESS, OMP and UTF-8, by
* magnum 2010-2011 and 2013.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_MSCHAPv2_new;
extern struct fmt_main fmt_NETNTLM_new;
#elif FMT_REGISTERS_H
john_register_one(&fmt_MSCHAPv2_new);
john_register_one(&fmt_NETNTLM_new);
#else
#include <string.h>
#include <openssl/des.h>
#include "arch.h"
#include "simd-intrinsics.h"
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD4)
#else
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 4
#endif
#include <omp.h>
#endif
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "memory.h"
#include "sha.h"
#include "md4.h"
#include "md5.h"
#include "unicode.h"
#include "john.h"
#include "memdbg.h"
extern volatile int bench_running;
#ifndef uchar
#define uchar unsigned char
#endif
#define CHAP_FORMAT_LABEL "MSCHAPv2"
#define CHAP_FORMAT_NAME "C/R"
#define FORMAT_TAG "$MSCHAPv2$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_TAGN "$NETNTLM$"
#define FORMAT_TAGN_LEN (sizeof(FORMAT_TAGN)-1)
#define CHAP_USERNAME_LENGTH 256
#define CHAP_CHALLENGE_LENGTH 64
#define CHAP_TOTAL_LENGTH 13 + CHAP_USERNAME_LENGTH + CHAP_CHALLENGE_LENGTH + CIPHERTEXT_LENGTH
#define NTLM_FORMAT_LABEL "netntlm"
#define NTLM_FORMAT_NAME "NTLMv1 C/R"
#define NTLM_TOTAL_LENGTH (10 + 2 * 2 * SALT_SIZE + CIPHERTEXT_LENGTH)
#define ALGORITHM_NAME "MD4 DES (ESS MD5) " MD4_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define FULL_BINARY_SIZE (2 + 8 * 3)
#define BINARY_SIZE (2 + 8)
#define BINARY_ALIGN 2
#define SALT_SIZE 8
#define SALT_ALIGN MEM_ALIGN_WORD
#define CIPHERTEXT_LENGTH 48
#ifdef SIMD_COEF_32
#define PLAINTEXT_LENGTH 27
//#define SSE_OMP
#if defined (_OPENMP) && defined(SSE_OMP)
#define BLOCK_LOOPS (2048 / NBKEYS)
#else
#define BLOCK_LOOPS (1024 / NBKEYS)
#endif
#define MIN_KEYS_PER_CRYPT (NBKEYS * BLOCK_LOOPS)
#define MAX_KEYS_PER_CRYPT (NBKEYS * BLOCK_LOOPS)
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 )
#define GETOUTPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*4*SIMD_COEF_32*4 )
#else
#define PLAINTEXT_LENGTH 64
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 2048
#endif
#ifdef SIMD_COEF_32
static unsigned char *saved_key;
#else
static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1];
static int (*saved_len);
#endif
static unsigned short (*crypt_key);
static unsigned char *nthash;
static ARCH_WORD_32 *bitmap;
static int cmps_per_crypt, use_bitmap;
static int valid_i, valid_j;
static uchar *challenge;
static int keys_prepared;
static struct fmt_main *my;
static char *chap_long_to_short(char *orig); /* used to cannonicalize the MSCHAPv2 format */
static struct fmt_tests chap_tests[] = {
{"$MSCHAPv2$4c092fd3fd98236502e8591100046326$b912ce522524d33123a982cf330a57f8e953fa7974042b5d$6a4915d0ce61d42be533640a75391925$1111", "2222"},
{"$MSCHAPv2$5B5D7C7D7B3F2F3E3C2C602132262628$82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF$21402324255E262A28295F2B3A337C7E$User", "clientPass"},
{"$MSCHAPv2$d07054459a1fdbc266a006f0220e6fac$33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde$3545cb1d89b507a5de104435e81b14a4$testuser1", "Cricket8"},
{"$MSCHAPv2$56d64cbe7bad61349a0b752335100eaf$d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b$7f8a466cff2a6bf0c80218bbf56d76bc$fred", "OMG!BBQ!11!one"}, /* domain\fred */
#if PLAINTEXT_LENGTH >= 35
{"$MSCHAPv2$b3c42db475b881d3c52ff3923d7b3bf8$f07c7a4eb391f5debe32d814679a5a69661b86b33227c4f8$6321f8649b971bd11ce8d5cb22a4a738$bOb", "asdblahblahblahblahblahblahblahblah"}, /* WorkGroup\bOb */
#endif
{"$MSCHAPv2$d94e7c7972b2376b28c268583e162de7$eba25a3b04d2c7085d01f842e2befc91745c40db0f792356$0677ca7318fd7f65ae1b4f58c9f4f400$lameuser", ""}, /* no password */
{"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$foo4", "bar4" },
{"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$", "bar4" },
/* Ettercap generated three test vectors */
{"$MSCHAPv2$3D79CC8CDC0261D4$B700770725F87739ADB110B310D9A289CDBB550ADCA6CB86$solar", "solarisalwaysbusy"},
{"$MSCHAPv2$BA75EB14EFBFBF25$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$lulu", "password"},
{"$MSCHAPv2$95A87FA62EBCD2E3C8B09E1B448A6C72$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$E2AE0995EAAC6CEFF0D9757428B51509$lulu", "password"},
/* Single test vector from chapcrack's sample pcap file */
{"$MSCHAPv2$6D0E1C056CD94D5F$1C93ABCE815400686BAECA315F348469256420598A73AD49$moxie", "bPCFyF2uL1p5Lg5yrKmqmY"},
{"", "clientPass", {"User", "", "", "5B5D7C7D7B3F2F3E3C2C602132262628", "82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF", "21402324255E262A28295F2B3A337C7E"} },
{"", "Cricket8", {"testuser1", "", "", "d07054459a1fdbc266a006f0220e6fac", "33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde", "3545cb1d89b507a5de104435e81b14a4"} },
{"", "OMG!BBQ!11!one", {"domain\\fred", "", "", "56d64cbe7bad61349a0b752335100eaf", "d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b", "7f8a466cff2a6bf0c80218bbf56d76bc"} }, /* domain\fred */
{"", "", {"lameuser", "", "domain", "d94e7c7972b2376b28c268583e162de7", "eba25a3b04d2c7085d01f842e2befc91745c40db0f792356", "0677ca7318fd7f65ae1b4f58c9f4f400"} }, /* no password */
{NULL}
};
static struct fmt_tests ntlm_tests[] = {
{"$NETNTLM$1122334455667788$BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "g3rg3g3rg3g3rg3"},
#ifndef SIMD_COEF_32 /* exceeds max length for SSE */
{"$NETNTLM$1122334455667788$E463FAA5D868ECE20CAE622474A2F440A652D642156AF863", "M1xedC4se%^&*@)##(blahblah!@#"},
#endif
{"$NETNTLM$c75c20bff9baa71f4765f360625700b0$81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "password"},
{"$NETNTLM$1122334455667788$35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "FooBarGerg"},
{"$NETNTLM$1122334455667788$A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "visit www.foofus.net"},
{"$NETNTLM$24ca92fdab441aa4c70e4fb229437ef3$abf7762caf2b1bbfc5cfc1f46665249f049e0af72ae5b5a9", "longpassword"},
{"$NETNTLM$1122334455667788$B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "cory21"},
{"", "g3rg3g3rg3g3rg3", {"User", "", "", "lm-hash", "BFCCAF26128EC95F9999C9792F49434267A1D9B0EF89BFFB", "1122334455667788"} },
{"", "FooBarGerg", {"User", "", "", "lm-hash", "35B62750E1B9B3205C50D6BA351092C12A1B9B3CDC65D44A", "1122334455667788"} },
{"", "visit www.foofus.net", {"User", "", "", "lm-hash", "A4765EBFE83D345A7CB1660B8899251905164029F8086DDE", "1122334455667788"} },
{"", "password", {"ESS", "", "", "4765f360625700b000000000000000000000000000000000", "81f5ecd8a77fe819f7f6689a08a27ac705fc2e1bb00cecb2", "c75c20bff9baa71f"} },
{"", "cory21", {"User", "", "", "lm-hash", "B2B2220790F40C88BCFF347C652F67A7C4A70D3BEBD70233", "1122334455667788"} },
{NULL}
};
static inline void setup_des_key(uchar key_56[], DES_key_schedule *ks)
{
DES_cblock key;
key[0] = key_56[0];
key[1] = (key_56[0] << 7) | (key_56[1] >> 1);
key[2] = (key_56[1] << 6) | (key_56[2] >> 2);
key[3] = (key_56[2] << 5) | (key_56[3] >> 3);
key[4] = (key_56[3] << 4) | (key_56[4] >> 4);
key[5] = (key_56[4] << 3) | (key_56[5] >> 5);
key[6] = (key_56[5] << 2) | (key_56[6] >> 6);
key[7] = (key_56[6] << 1);
DES_set_key(&key, ks);
}
static int chap_valid_long(char *ciphertext)
{
char *pos, *pos2;
if (ciphertext == NULL) return 0;
else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if (strlen(ciphertext) > CHAP_TOTAL_LENGTH)
return 0;
/* Validate Authenticator/Server Challenge Length */
pos = &ciphertext[FORMAT_TAG_LEN];
for (pos2 = pos; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 2)) )
return 0;
/* Validate MSCHAPv2 Response Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) )
return 0;
/* Validate Peer/Client Challenge Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 2)) )
return 0;
/* Validate Username Length */
if (strlen(++pos2) > CHAP_USERNAME_LENGTH)
return 0;
return 1;
}
static int chap_valid_short(char *ciphertext)
{
char *pos, *pos2;
if (ciphertext == NULL) return 0;
else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if (strlen(ciphertext) > CHAP_TOTAL_LENGTH)
return 0;
/* Validate MSCHAPv2 Challenge Length */
pos = &ciphertext[FORMAT_TAG_LEN];
for (pos2 = pos; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHAP_CHALLENGE_LENGTH / 4)) )
return 0;
/* Validate MSCHAPv2 Response Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) )
return 0;
return 1;
}
static void chap_get_challenge(const char *ciphertext,
unsigned char *binary_salt)
{
int i;
const char *pos = ciphertext + FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; i++)
binary_salt[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) +
atoi16[ARCH_INDEX(pos[i*2+1])];
}
/* Either the cipherext already contains the MSCHAPv2 Challenge (4 Bytes) or
we are going to calculate it via:
sha1(|Peer/Client Challenge (8 Bytes)|Authenticator/Server Challenge (8 Bytes)|Username (<=256)|)
NOTE, we now ONLY call this function the the short form. The long form gets converted into the short
form in either prepare or split function. The short form is cannonical form (Change made July, 2014, JimF)
*/
static void *chap_get_salt(char *ciphertext)
{
static unsigned char *binary_salt;
unsigned char digest[20];
if (!binary_salt)
binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
/* This is just to silence scan-build. It will never happen.
It is unclear why only this format gave warnings, many others do
similar things. */
if (!ciphertext)
return ciphertext;
memset(binary_salt, 0, SALT_SIZE);
memset(digest, 0, 20);
chap_get_challenge(ciphertext, binary_salt);
return (void*)binary_salt;
}
/*
* This function will convert long hashes, into short ones (the short is now cannonical format)
* converts
* $MSCHAPv2$95a87fa62ebcd2e3c8b09e1b448a6c72$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$e2ae0995eaac6ceff0d9757428b51509$lulu
* into
* $MSCHAPv2$ba75eb14efbfbf25$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$$
*
* This code was moved from get_salt().
*/
static char *chap_long_to_short(char *ciphertext) {
static char Buf[CHAP_TOTAL_LENGTH+1]; // larger than we need, but not a big deal
static SHA_CTX ctx;
unsigned char tmp[16];
unsigned char digest[20];
char *pos = NULL;
int i;
SHA1_Init(&ctx);
/* Peer Challenge */
pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1; /* Skip $MSCHAPv2$, Authenticator Challenge and Response Hash */
memset(tmp, 0, 16);
for (i = 0; i < 16; i++)
tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
SHA1_Update(&ctx, tmp, 16);
/* Authenticator Challenge */
pos = ciphertext + FORMAT_TAG_LEN; /* Skip $MSCHAPv2$ */
memset(tmp, 0, 16);
for (i = 0; i < 16; i++)
tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
SHA1_Update(&ctx, tmp, 16);
/* Username - Only the user name (as presented by the peer and
excluding any prepended domain name) is used as input to SHAUpdate()
*/
pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1 + 16*2 + 1; /* Skip $MSCHAPv2$, Authenticator, Response and Peer */
SHA1_Update(&ctx, pos, strlen(pos));
SHA1_Final(digest, &ctx);
// Ok, now we re-make our ciphertext buffer, into the short cannonical form.
strcpy(Buf, FORMAT_TAG);
pos = Buf + FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; i++) {
//binary_salt.u8[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
pos[(i<<1)] = itoa16[digest[i]>>4];
pos[(i<<1)+1] = itoa16[digest[i]&0xF];
}
memcpy(&pos[16], &ciphertext[42], CIPHERTEXT_LENGTH+2);
pos[16+CIPHERTEXT_LENGTH+2] = '$';
pos[16+CIPHERTEXT_LENGTH+3] = 0;
//printf ("short=%s original=%s\n", Buf, ciphertext);
return Buf;
}
static int chap_valid(char *ciphertext, struct fmt_main *pFmt)
{
char *cp = NULL;
if (chap_valid_short(ciphertext))
cp = ciphertext + FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 4 + 1;
else if (chap_valid_long(ciphertext))
cp = ciphertext + FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 2 + 1;
if (cp) {
uchar key[7] = {0, 0, 0, 0, 0, 0, 0};
DES_key_schedule ks;
DES_cblock b3cmp;
uchar binary[8];
DES_cblock *challenge = chap_get_salt(ciphertext);
int i, j;
cp += 2 * 8 * 2;
for (i = 0; i < 8; i++) {
binary[i] = atoi16[ARCH_INDEX(cp[i * 2])] << 4;
binary[i] |= atoi16[ARCH_INDEX(cp[i * 2 + 1])];
}
key[0] = valid_i; key[1] = valid_j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8))
return 1;
for (i = 0; i < 0x100; i++)
for (j = 0; j < 0x100; j++) {
key[0] = i; key[1] = j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8)) {
valid_i = i;
valid_j = j;
return 1;
}
}
#ifdef DEBUG
if (!bench_running)
fprintf(stderr, "Rejected MSCHAPv2 hash with "
"invalid 3rd block\n");
#endif
}
return 0;
}
static char *chap_prepare_long(char *split_fields[10])
{
char *username, *cp;
/* DOMAIN\USERNAME -or - USERNAME -- ignore DOMAIN */
if ((username = strstr(split_fields[0], "\\")) == NULL)
username = split_fields[0];
else
username++;
cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+
1+strlen(split_fields[5])+1+strlen(username)+1);
sprintf(cp, "%s%s$%s$%s$%s", FORMAT_TAG, split_fields[3], split_fields[4],
split_fields[5], username);
if (chap_valid_long(cp)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *chap_prepare_short(char *split_fields[10])
{
char *cp;
cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+
1+1+1);
sprintf(cp, "%s%s$%s$$", FORMAT_TAG, split_fields[3], split_fields[4]);
if (chap_valid_short(cp)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *chap_prepare(char *split_fields[10], struct fmt_main *pFmt)
{
char *ret;
if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) {
// check for a short format that has any extra trash fields, and if so remove them.
char *cp1, *cp2, *cp3;
cp1 = split_fields[1];
cp1 += FORMAT_TAG_LEN;
cp2 = strchr(cp1, '$');
ret = NULL;
if (cp2 && cp2-cp1 == CHAP_CHALLENGE_LENGTH/4) {
++cp2;
cp3 = strchr(cp2, '$');
if (cp3 && cp3-cp2 == CIPHERTEXT_LENGTH && (strlen(cp3) > 2 || cp3[2] != '$')) {
ret = str_alloc_copy(split_fields[1]);
ret[(cp3-split_fields[1]) + 1] = '$';
ret[(cp3-split_fields[1]) + 2] = 0;
//printf ("Here is the cut item: %s\n", ret);
}
}
}
else if (split_fields[0] && split_fields[3] && split_fields[4] &&
split_fields[5] &&
strlen(split_fields[3]) == CHAP_CHALLENGE_LENGTH/2 &&
strlen(split_fields[4]) == CIPHERTEXT_LENGTH &&
strlen(split_fields[5]) == CHAP_CHALLENGE_LENGTH/2)
ret = chap_prepare_long(split_fields);
else if (split_fields[0] && split_fields[3] && split_fields[4] &&
strlen(split_fields[3]) == CHAP_CHALLENGE_LENGTH/4 &&
strlen(split_fields[4]) == CIPHERTEXT_LENGTH)
ret = chap_prepare_short(split_fields);
else
ret = NULL;
if (ret && chap_valid_long(ret))
ret = chap_long_to_short(ret);
else if (chap_valid_long(split_fields[1]))
ret = chap_long_to_short(split_fields[1]);
return ret ? ret : split_fields[1];
}
static char *chap_split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CHAP_TOTAL_LENGTH + 1];
int i, j = 0;
memset(out, 0, CHAP_TOTAL_LENGTH + 1);
memcpy(out, ciphertext, strlen(ciphertext));
/* convert hashes to lower-case - exclude $MSCHAPv2 and USERNAME */
for (i = FORMAT_TAG_LEN; i < CHAP_TOTAL_LENGTH + 1 && j < 3; i++) {
if (out[i] >= 'A' && out[i] <= 'Z')
out[i] |= 0x20;
else if (out[i] == '$')
j++;
}
if (chap_valid_long(out))
return chap_long_to_short(out);
return out;
}
static void *ntlm_get_salt(char *ciphertext)
{
static uchar *binary_salt;
int i;
if (!binary_salt)
binary_salt = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
if (ciphertext[25] == '$') {
// Server challenge
ciphertext += FORMAT_TAGN_LEN;
for (i = 0; i < SALT_SIZE; ++i)
binary_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) +
atoi16[ARCH_INDEX(ciphertext[i*2+1])];
} else {
uchar es_salt[2*SALT_SIZE], k1[2*SALT_SIZE];
MD5_CTX ctx;
ciphertext += FORMAT_TAGN_LEN;
// Extended Session Security,
// Concatenate Server & Client challenges
for (i = 0;i < 2 * SALT_SIZE; ++i)
es_salt[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])] << 4) +
atoi16[ARCH_INDEX(ciphertext[i*2+1])];
// MD5 the concatenated challenges, result is our key
MD5_Init(&ctx);
MD5_Update(&ctx, es_salt, 16);
MD5_Final((void*)k1, &ctx);
memcpy(binary_salt, k1, SALT_SIZE); // but only 8 bytes of it
}
return (void*)binary_salt;
}
static int ntlm_valid(char *ciphertext, struct fmt_main *self)
{
char *pos;
if (strncmp(ciphertext, FORMAT_TAGN, FORMAT_TAGN_LEN)!=0) return 0;
if ((strlen(ciphertext) != 74) && (strlen(ciphertext) != 90)) return 0;
if ((ciphertext[25] != '$') && (ciphertext[41] != '$')) return 0;
for (pos = &ciphertext[FORMAT_TAGN_LEN]; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (*pos != '$') return 0;
for (pos++; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (!*pos && ((pos - ciphertext - 26 == CIPHERTEXT_LENGTH) ||
(pos - ciphertext - 42 == CIPHERTEXT_LENGTH))) {
uchar key[7] = {0, 0, 0, 0, 0, 0, 0};
DES_key_schedule ks;
DES_cblock b3cmp;
uchar binary[8];
DES_cblock *challenge = ntlm_get_salt(ciphertext);
int i, j;
ciphertext = strrchr(ciphertext, '$') + 1 + 2 * 8 * 2;
for (i = 0; i < 8; i++) {
binary[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] << 4;
binary[i] |= atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
}
key[0] = valid_i; key[1] = valid_j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8))
return 1;
for (i = 0; i < 0x100; i++)
for (j = 0; j < 0x100; j++) {
key[0] = i; key[1] = j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(binary, &b3cmp, 8)) {
valid_i = i;
valid_j = j;
return 1;
}
}
#ifdef DEBUG
if (!bench_running)
fprintf(stderr, "Rejected NetNTLM hash with invalid "
"3rd block\n");
#endif
}
return 0;
}
static char *ntlm_prepare(char *split_fields[10], struct fmt_main *self)
{
char *cp;
char clientChal[17];
if (!strncmp(split_fields[1], FORMAT_TAGN, FORMAT_TAGN_LEN))
return split_fields[1];
if (!split_fields[3]||!split_fields[4]||!split_fields[5])
return split_fields[1];
if (strlen(split_fields[4]) != CIPHERTEXT_LENGTH)
return split_fields[1];
// this string suggests we have an improperly formatted NTLMv2
if (!strncmp(&split_fields[4][32], "0101000000000000", 16))
return split_fields[1];
// Ignore anonymous login (Username "", Password "")
if (split_fields[0] && strlen(split_fields[0]) == 0 &&
!strncasecmp(split_fields[3], "edb7398877d716be", 16) &&
!strncasecmp(split_fields[4], "42aeb71fbb6dc18499016b08"
"b178ba65430ad39ae2498629", 48))
return split_fields[1];
// Handle ESS (8 byte client challenge in "LM" field padded with zeros)
if (strlen(split_fields[3]) == 48 &&
!strncmp(&split_fields[3][16], "00000000000000000000000000000000",
32))
{
memcpy(clientChal, split_fields[3],16);
clientChal[16] = 0;
}
else
clientChal[0] = 0;
cp = mem_alloc(FORMAT_TAGN_LEN+strlen(split_fields[5])+strlen(clientChal)+1+
strlen(split_fields[4])+1);
sprintf(cp, "%s%s%s$%s", FORMAT_TAGN, split_fields[5], clientChal,
split_fields[4]);
if (ntlm_valid(cp,self)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *ntlm_split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[NTLM_TOTAL_LENGTH + 1];
memset(out, 0, NTLM_TOTAL_LENGTH + 1);
strcpy(out, ciphertext);
strlwr(&out[FORMAT_TAGN_LEN]); /* Exclude: $NETNTLM$ */
return out;
}
static void set_salt(void *salt)
{
challenge = salt;
}
// ISO-8859-1 to UCS-2, directly into vector key buffer
static void set_key_ansi(char *_key, int index)
{
#ifdef SIMD_COEF_32
const uchar *key = (uchar*)_key;
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)];
unsigned int len, temp2;
len = 0;
while((temp2 = *key++)) {
unsigned int temp;
if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1)
{
temp2 |= (temp << 16);
*keybuf_word = temp2;
}
else
{
temp2 |= (0x80 << 16);
*keybuf_word = temp2;
len++;
goto key_cleaning;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) +
(unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
#if ARCH_LITTLE_ENDIAN
UTF8 *s = (UTF8*)_key;
UTF16 *d = saved_key[index];
while (*s)
*d++ = *s++;
*d = 0;
saved_len[index] = (int)((char*)d - (char*)saved_key[index]);
#else
UTF8 *s = (UTF8*)_key;
UTF8 *d = (UTF8*)saved_key[index];
while (*s) {
*d++ = *s++;
++d;
}
*d = 0;
saved_len[index] = (int)((char*)d - (char*)saved_key[index]);
#endif
#endif
keys_prepared = 0;
}
// Legacy codepage to UCS-2, directly into vector key buffer
static void set_key_CP(char *_key, int index)
{
#ifdef SIMD_COEF_32
const uchar *key = (uchar*)_key;
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)];
unsigned int len, temp2;
len = 0;
while((temp2 = *key++)) {
unsigned int temp;
temp2 = CP_to_Unicode[temp2];
if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1)
{
temp = CP_to_Unicode[temp];
temp2 |= (temp << 16);
*keybuf_word = temp2;
} else {
temp2 |= (0x80 << 16);
*keybuf_word = temp2;
len++;
goto key_cleaning_enc;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80;
key_cleaning_enc:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) +
(unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
saved_len[index] = enc_to_utf16(saved_key[index],
PLAINTEXT_LENGTH + 1,
(uchar*)_key,
strlen(_key)) << 1;
if (saved_len[index] < 0)
saved_len[index] = strlen16(saved_key[index]);
#endif
keys_prepared = 0;
}
// UTF-8 to UCS-2, directly into vector key buffer
static void set_key_utf8(char *_key, int index)
{
#ifdef SIMD_COEF_32
const UTF8 *source = (UTF8*)_key;
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)];
UTF32 chl, chh = 0x80;
unsigned int len = 0;
while (*source) {
chl = *source;
if (chl >= 0xC0) {
unsigned int extraBytesToRead;
extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f];
switch (extraBytesToRead) {
#if NT_FULL_UNICODE
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
#endif
case 2:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chl -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
#if NT_FULL_UNICODE
if (chl > UNI_MAX_BMP) {
if (len == PLAINTEXT_LENGTH) {
chh = 0x80;
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
break;
}
#define halfBase 0x0010000UL
#define halfShift 10
#define halfMask 0x3FFUL
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_LOW_START (UTF32)0xDC00
chl -= halfBase;
chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);;
chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START);
len++;
} else
#endif
if (*source && len < PLAINTEXT_LENGTH) {
chh = *source;
if (chh >= 0xC0) {
unsigned int extraBytesToRead =
opt_trailingBytesUTF8[chh & 0x3f];
switch (extraBytesToRead) {
#if NT_FULL_UNICODE
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
#endif
case 2:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chh -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
} else {
chh = 0x80;
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
break;
}
*keybuf_word = (chh << 16) | chl;
keybuf_word += SIMD_COEF_32;
}
if (chh != 0x80 || len == 0) {
*keybuf_word = 0x80;
keybuf_word += SIMD_COEF_32;
}
bailout:
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
((unsigned int*)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) +
(unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4;
#else
saved_len[index] = utf8_to_utf16(saved_key[index],
PLAINTEXT_LENGTH + 1,
(uchar*)_key,
strlen(_key)) << 1;
if (saved_len[index] < 0)
saved_len[index] = strlen16(saved_key[index]);
#endif
keys_prepared = 0;
}
static void init(struct fmt_main *self)
{
#if defined (_OPENMP) && !defined(SIMD_COEF_32)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
my = self;
if (options.target_enc == UTF_8) {
self->methods.set_key = set_key_utf8;
self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
} else {
if (options.target_enc != ASCII &&
options.target_enc != ISO_8859_1)
self->methods.set_key = set_key_CP;
}
if (!saved_key) {
#if SIMD_COEF_32
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key) * 64, MEM_ALIGN_SIMD);
nthash = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*nthash) * 16, MEM_ALIGN_SIMD);
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
nthash = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*nthash) * 16);
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
#endif
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(unsigned short));
}
if (bitmap == NULL)
bitmap = mem_calloc_align(1, 0x10000 / 8, MEM_ALIGN_CACHE);
else
memset(bitmap, 0, 0x10000 / 8);
use_bitmap = 0; /* we did not use bitmap yet */
cmps_per_crypt = 2; /* try bitmap */
}
static void done(void)
{
MEM_FREE(bitmap);
MEM_FREE(crypt_key);
MEM_FREE(nthash);
#ifndef SIMD_COEF_32
MEM_FREE(saved_len);
#endif
MEM_FREE(saved_key);
}
// Get the key back from the key buffer, from UCS-2
static char *get_key(int index)
{
#ifdef SIMD_COEF_32
unsigned int *keybuf_word = (unsigned int*)&saved_key[GETPOS(0, index)];
static UTF16 key[PLAINTEXT_LENGTH + 1];
unsigned int md4_size=0;
unsigned int i=0;
for(; md4_size < PLAINTEXT_LENGTH; i += SIMD_COEF_32, md4_size++)
{
key[md4_size] = keybuf_word[i];
key[md4_size+1] = keybuf_word[i] >> 16;
if (key[md4_size] == 0x80 && key[md4_size+1] == 0) {
key[md4_size] = 0;
break;
}
++md4_size;
if (key[md4_size] == 0x80 &&
((keybuf_word[i+SIMD_COEF_32]&0xFFFF) == 0 ||
md4_size == PLAINTEXT_LENGTH))
{
key[md4_size] = 0;
break;
}
}
return (char*)utf16_to_enc(key);
#else
return (char*)utf16_to_enc(saved_key[index]);
#endif
}
static void *get_binary(char *ciphertext)
{
static uchar *binary;
static int warned = 0, loaded = 0;
DES_cblock *challenge = my->methods.salt(ciphertext);
int i, j;
if (!binary) binary = mem_alloc_tiny(FULL_BINARY_SIZE, BINARY_ALIGN);
if (john_main_process)
if (!warned && !ldr_in_pot && !bench_running && ++loaded > 100) {
warned = 1;
fprintf(stderr, "%s: Note: slow loading. For short runs, try "
"--format=%s-naive\ninstead. That version loads "
"faster but runs slower.\n", my->params.label,
my->params.label);
}
if (chap_valid_short(ciphertext))
ciphertext += FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 4 + 1;
else if (chap_valid_long(ciphertext))
ciphertext += FORMAT_TAG_LEN + CHAP_CHALLENGE_LENGTH / 2 + 1;
else /* ntlmv1 */
ciphertext = strrchr(ciphertext, '$') + 1;
for (i = 0; i < FULL_BINARY_SIZE - 2; i++) {
binary[2 + i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] << 4;
binary[2 + i] |= atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
}
{
uchar key[7] = {0, 0, 0, 0, 0, 0, 0};
DES_key_schedule ks;
DES_cblock b3cmp;
key[0] = valid_i; key[1] = valid_j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(&binary[2 + 8 * 2], &b3cmp, 8)) {
binary[0] = valid_i; binary[1] = valid_j;
goto out;
}
for (i = 0; i < 0x100; i++)
for (j = 0; j < 0x100; j++) {
key[0] = i; key[1] = j;
setup_des_key(key, &ks);
DES_ecb_encrypt(challenge, &b3cmp, &ks, DES_ENCRYPT);
if (!memcmp(&binary[2 + 8 * 2], &b3cmp, 8)) {
binary[0] = i; binary[1] = j;
goto out;
}
}
fprintf(stderr, "Bug: %s hash with invalid 3rd block, should "
"have been rejected in valid()\n", my->params.label);
binary[0] = binary[1] = 0x55;
}
out:
return binary;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
if (!keys_prepared) {
int i = 0;
if (use_bitmap) {
#if MAX_KEYS_PER_CRYPT >= 200
//#warning Notice: Using memset
memset(bitmap, 0, 0x10000 / 8);
#else
//#warning Notice: Not using memset
#ifdef SIMD_COEF_32
for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++)
#else
for (i = 0; i < count; i++)
#endif
{
unsigned int value = crypt_key[i];
bitmap[value >> 5] = 0;
}
#endif
}
use_bitmap = cmps_per_crypt >= 2;
cmps_per_crypt = 0;
#ifdef SIMD_COEF_32
#if (BLOCK_LOOPS > 1)
#if defined(_OPENMP) && defined(SSE_OMP)
#pragma omp parallel for
#endif
for (i = 0; i < BLOCK_LOOPS; i++)
SIMDmd4body(&saved_key[i * NBKEYS * 64], (unsigned int*)&nthash[i * NBKEYS * 16], NULL, SSEi_MIXED_IN);
#else
SIMDmd4body(saved_key, (unsigned int*)nthash, NULL, SSEi_MIXED_IN);
#endif
if (use_bitmap)
for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) {
unsigned int value;
value = *(ARCH_WORD_32*)
&nthash[GETOUTPOS(12, i)] >> 16;
crypt_key[i] = value;
bitmap[value >> 5] |= 1U << (value & 0x1f);
}
else
for (i = 0; i < NBKEYS * BLOCK_LOOPS; i++) {
crypt_key[i] = *(ARCH_WORD_32*)
&nthash[GETOUTPOS(12, i)] >> 16;
}
#else
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++)
#endif
{
MD4_CTX ctx;
MD4_Init( &ctx );
MD4_Update(&ctx, saved_key[i], saved_len[i]);
MD4_Final((uchar*)&nthash[i * 16], &ctx);
crypt_key[i] = ((unsigned short*)&nthash[i * 16])[7];
if (use_bitmap) {
unsigned int value = crypt_key[i];
bitmap[value >> 5] |= 1U << (value & 0x1f);
}
}
#endif
keys_prepared = 1;
}
return count;
}
static int cmp_one(void *binary, int index)
{
if (crypt_key[index] == *(unsigned short*)binary) {
DES_key_schedule ks;
DES_cblock computed_binary;
unsigned int key[2];
#ifdef SIMD_COEF_32
int i;
for (i = 0; i < 2; i++)
key[i] = *(ARCH_WORD_32*)
&nthash[GETOUTPOS(4 * i, index)];
#else
memcpy(key, &nthash[index * 16], 8);
#endif
setup_des_key((unsigned char*)key, &ks);
DES_ecb_encrypt((DES_cblock*)challenge, &computed_binary,
&ks, DES_ENCRYPT);
return !memcmp(((char*)binary) + 2, computed_binary, 8);
}
return 0;
}
static int cmp_all(void *binary, int count)
{
unsigned int value = *(unsigned short*)binary;
int index;
cmps_per_crypt++;
if (use_bitmap && !(bitmap[value >> 5] & (1U << (value & 0x1f))))
goto out;
#ifdef SIMD_COEF_32
/* Let's give the optimizer a hint! */
for (index = 0; index < NBKEYS * BLOCK_LOOPS; index += 2)
#else
for (index = 0; index < count; index += 2)
#endif
{
unsigned int a = crypt_key[index];
unsigned int b = crypt_key[index + 1];
#if 0
if (((a | b) & value) != value)
continue;
#endif
if (a == value || b == value)
goto thorough;
}
goto out;
thorough:
#ifdef SIMD_COEF_32
for (index = 0; index < NBKEYS * BLOCK_LOOPS; index++)
#else
for (; index < count; index++)
#endif
{
if (crypt_key[index] == value && cmp_one(binary, index))
return 1;
}
out:
return 0;
}
static int cmp_exact(char *source, int index)
{
DES_key_schedule ks;
uchar binary[24];
unsigned char key[21];
char *cp;
int i;
#ifdef SIMD_COEF_32
for (i = 0; i < 4; i++)
((ARCH_WORD_32*)key)[i] = *(ARCH_WORD_32*)
&nthash[GETOUTPOS(4 * i, index)];
#else
memcpy(key, &nthash[index * 16], 16);
#endif
/* Hash is NULL padded to 21-bytes */
memset(&key[16], 0, 5);
/* Split into three 7-byte segments for use as DES keys
Use each key to DES encrypt challenge
Concatenate output to for 24-byte NTLM response */
setup_des_key(key, &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)binary,
&ks, DES_ENCRYPT);
setup_des_key(&key[7], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[8],
&ks, DES_ENCRYPT);
setup_des_key(&key[14], &ks);
DES_ecb_encrypt((DES_cblock*)challenge, (DES_cblock*)&binary[16],
&ks, DES_ENCRYPT);
// With the normalized source we simply need to skip the
// $MSCHAPv2$hhhhhhhhhhhhhhhh$ string to get 'real' binary data.
// $NETNTLM$c75c20bff9baa71f4765f360625700b0$
cp = &source[11];
cp = strchr(cp, '$');
++cp;
for (i = 0; i < 24; ++i) {
unsigned char c = (atoi16[ARCH_INDEX(*cp)] << 4) +
(atoi16[ARCH_INDEX(*(cp+1))] );
if (c != binary[i])
return 0;
cp += 2;
}
return 1;
}
static int salt_hash(void *salt) { return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1); }
static int binary_hash_0(void *binary) { return *(unsigned short*)binary & PH_MASK_0; }
static int binary_hash_1(void *binary) { return *(unsigned short*)binary & PH_MASK_1; }
static int binary_hash_2(void *binary) { return *(unsigned short*)binary & PH_MASK_2; }
static int binary_hash_3(void *binary) { return *(unsigned short*)binary & PH_MASK_3; }
static int get_hash_0(int index) { return crypt_key[index] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[index] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[index] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[index] & PH_MASK_3; }
struct fmt_main fmt_MSCHAPv2_new = {
{
CHAP_FORMAT_LABEL,
CHAP_FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if !defined(SIMD_COEF_32) || (defined(SIMD_COEF_32) && defined(SSE_OMP))
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAG },
chap_tests
}, {
init,
done,
fmt_default_reset,
chap_prepare,
chap_valid,
chap_split,
get_binary,
chap_get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
NULL,
NULL,
NULL
},
salt_hash,
NULL,
set_salt,
set_key_ansi,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
NULL,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
struct fmt_main fmt_NETNTLM_new = {
{
NTLM_FORMAT_LABEL,
NTLM_FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if !defined(SIMD_COEF_32) || (defined(SIMD_PARA_MD4) && defined(SSE_OMP))
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAGN },
ntlm_tests
}, {
init,
done,
fmt_default_reset,
ntlm_prepare,
ntlm_valid,
ntlm_split,
get_binary,
ntlm_get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
NULL,
NULL,
NULL
},
salt_hash,
NULL,
set_salt,
set_key_ansi,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
NULL,
NULL,
NULL
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
omp_thread_attach_test_2.c | // execute in sequence
// input the number of num_user_threadsation
#include <stdlib.h>
#include <pthread.h>
#include <omp.h>
#include <sys/timeb.h>
#include <omp_interop.h>
#include <unistd.h>
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
int user_thread_return = 0;
void *test_fun(void *arg){
printf("thread: %d\n", *((int*)arg));
omp_thread_attach_with_callback(NULL, &user_thread_return);
printf("thread call back: %d\n", *((int*)arg));
return ((void*)0);
}
int main(int argc, char * argv[]) {
if (argc >= 2){
omp_set_num_threads(atoi(argv[1]));
num_user_threads = atoi(argv[1]);
}
int num_user_threads = 100;
pthread_t pthreads[num_user_threads];
// pthread_create
for(i=0; i<num_user_threads; i++){
pthread_create(&pthreads[i], NULL, test_fun, NULL);
}
// omp_set_nested();
// create 50 threads and put them into threadpool
#pragma omp parallel shared(user_thread_id, counter) private(tid) num_threads(50)
{
tid = omp_get_thread_num();
}
user_thread_return = 1;
for(i=0; i<num_user_threads; i++) {
pthread_join(pthreads[i], NULL);
}
// while(1);
}
|
expected_output.c | #include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <polybench.h>
#include "gramschmidt.h"
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/*gramschmidt.c: this file is part of PolyBench/C*/
/*Include polybench common header.*/
/*Include benchmark-specific header.*/
/*Array initialization.*/
static void init_array(int m, int n, double A[1000][1200], double R[1200][1200], double Q[1000][1200]) {
int i, j;
for(i = 0; i < m; i++)
for(j = 0; j < n; j++) {
A[i][j] = (((double) ((i * j) % m) / m) * 100) + 10;
Q[i][j] = 0.0;
}
for(i = 0; i < n; i++)
for(j = 0; j < n; j++)
R[i][j] = 0.0;
}
/*DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output.*/
static void print_array(int m, int n, double A[1000][1200], double R[1200][1200], double Q[1000][1200]) {
int i, j;
fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n");
fprintf(stderr, "begin dump: %s", "R");
for(i = 0; i < n; i++)
for(j = 0; j < n; j++) {
if((i * n + j) % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", R[i][j]);
}
fprintf(stderr, "\nend dump: %s\n", "R");
fprintf(stderr, "begin dump: %s", "Q");
for(i = 0; i < m; i++)
for(j = 0; j < n; j++) {
if((i * n + j) % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", Q[i][j]);
}
fprintf(stderr, "\nend dump: %s\n", "Q");
fprintf(stderr, "==END DUMP_ARRAYS==\n");
}
/*Main computational kernel. The whole function will be timed,
including the call and return.*/
/*QR Decomposition with Modified Gram Schmidt:
http://www.inf.ethz.ch/personal/gander/*/
static void kernel_gramschmidt(int m, int n, double A[1000][1200], double R[1200][1200], double Q[1000][1200]) {
int i, j, k;
double nrm;
/*************** Clava msgError **************
unsolved dependency for arrayAccess A use : RW
****************************************/
for(k = 0; k < n; k++) {
nrm = 0.0;
#pragma omp parallel for default(shared) private(i) firstprivate(m, k, A) reduction(+ : nrm)
for(i = 0; i < m; i++)
nrm += A[i][k] * A[i][k];
R[k][k] = sqrt(nrm);
#pragma omp parallel for default(shared) private(i) firstprivate(m, k, A, R)
for(i = 0; i < m; i++)
Q[i][k] = A[i][k] / R[k][k];
#pragma omp parallel for default(shared) private(j, i) firstprivate(k, n, m, Q)
for(j = k + 1; j < n; j++) {
R[k][j] = 0.0;
// #pragma omp parallel for default(shared) private(i) firstprivate(m, k, j, Q, A) reduction(+ : R[k][j])
for(i = 0; i < m; i++)
R[k][j] += Q[i][k] * A[i][j];
// #pragma omp parallel for default(shared) private(i) firstprivate(m, k, j, Q, R)
for(i = 0; i < m; i++)
A[i][j] = A[i][j] - Q[i][k] * R[k][j];
}
}
}
int main(int argc, char **argv) {
/*Retrieve problem size.*/
int m = 1000;
int n = 1200;
/*Variable declaration/allocation.*/
double (*A)[1000][1200];
A = (double (*)[1000][1200]) polybench_alloc_data((1000 + 0) * (1200 + 0), sizeof(double));
;
double (*R)[1200][1200];
R = (double (*)[1200][1200]) polybench_alloc_data((1200 + 0) * (1200 + 0), sizeof(double));
;
double (*Q)[1000][1200];
Q = (double (*)[1000][1200]) polybench_alloc_data((1000 + 0) * (1200 + 0), sizeof(double));
;
/*Initialize array(s).*/
init_array(m, n, *A, *R, *Q);
/*Start timer.*/
;
/*Run kernel.*/
kernel_gramschmidt(m, n, *A, *R, *Q);
/*Stop and print timer.*/
;
;
/*Prevent dead-code elimination. All live-out data must be printed
by the function call in argument.*/
if(argc > 42 && !strcmp(argv[0], "")) print_array(m, n, *A, *R, *Q);
/*Be clean.*/
free((void *) A);
;
free((void *) R);
;
free((void *) Q);
;
return 0;
}
|
parfor.h | // Copyright 2019 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef PARFOR_H_
#define PARFOR_H_
#include <omp.h>
#include <cstdint>
#include <utility>
#include <vector>
namespace qsim {
template <uint64_t MIN_SIZE>
struct ParallelForT {
explicit ParallelForT(unsigned num_threads) : num_threads(num_threads) {}
// GetIndex0 and GetIndex1 are useful when we need to know how work was
// divided between threads, for instance, for reusing partial sums obtained
// by RunReduceP.
uint64_t GetIndex0(uint64_t size, unsigned thread_id) const {
return size >= MIN_SIZE ? size * thread_id / num_threads : 0;
}
uint64_t GetIndex1(uint64_t size, unsigned thread_id) const {
return size >= MIN_SIZE ? size * (thread_id + 1) / num_threads : size;
}
template <typename Function, typename... Args>
void Run(uint64_t size, Function&& func, Args&&... args) const {
if (num_threads > 1 && size >= MIN_SIZE) {
#pragma omp parallel num_threads(num_threads)
{
unsigned n = omp_get_num_threads();
unsigned m = omp_get_thread_num();
uint64_t i0 = GetIndex0(size, m);
uint64_t i1 = GetIndex1(size, m);
for (uint64_t i = i0; i < i1; ++i) {
func(n, m, i, args...);
}
}
} else {
for (uint64_t i = 0; i < size; ++i) {
func(1, 0, i, args...);
}
}
}
template <typename Function, typename Op, typename... Args>
std::vector<typename Op::result_type> RunReduceP(
uint64_t size, Function&& func, Op&& op, Args&&... args) const {
std::vector<typename Op::result_type> partial_results;
if (num_threads > 1 && size >= MIN_SIZE) {
partial_results.resize(num_threads, 0);
#pragma omp parallel num_threads(num_threads)
{
unsigned n = omp_get_num_threads();
unsigned m = omp_get_thread_num();
uint64_t i0 = GetIndex0(size, m);
uint64_t i1 = GetIndex1(size, m);
typename Op::result_type partial_result = 0;
for (uint64_t i = i0; i < i1; ++i) {
partial_result = op(partial_result, func(n, m, i, args...));
}
partial_results[m] = partial_result;
}
} else if (num_threads > 0) {
typename Op::result_type result = 0;
for (uint64_t i = 0; i < size; ++i) {
result = op(result, func(1, 0, i, args...));
}
partial_results.resize(1, result);
}
return partial_results;
}
template <typename Function, typename Op, typename... Args>
typename Op::result_type RunReduce(uint64_t size, Function&& func,
Op&& op, Args&&... args) const {
auto partial_results = RunReduceP(size, func, std::move(op), args...);
typename Op::result_type result = 0;
for (auto partial_result : partial_results) {
result = op(result, partial_result);
}
return result;
}
unsigned num_threads;
};
using ParallelFor = ParallelForT<1024>;
} // namespace qsim
#endif // PARFOR_H_
|
pi_omp_padding.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* Parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h> /* OpenMP */
#if _EXTRAE_
#include "extrae_user_events.h"
// Extrae Constants
#define PROGRAM 1000
#define END 0
#define SERIAL 1
#define PARALLEL 2
#else
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
#endif
#define NUMTHRDS 16
#define CACHE_SIZE 64
double sumvector[NUMTHRDS][CACHE_SIZE/sizeof(double)]; // sum for each thread, with padding to avoid false sharing
int main(int argc, char *argv[]) {
#if _EXTRAE_
Extrae_event (PROGRAM, SERIAL);
#else
double stamp;
START_COUNT_TIME;
#endif
double x, sum=0.0, pi=0.0;
double step;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
long int num_steps = atoi(argv[1]);
step = 1.0/(double) num_steps;
#if _EXTRAE_
Extrae_event (PROGRAM, END);
#endif
/* do computation -- using all available threads */
#if _EXTRAE_
Extrae_event (PROGRAM, PARALLEL);
#endif
for (int i=0; i<NUMTHRDS; i++)
sumvector[i][0] = 0.0;
#pragma omp parallel private(x)
{
int myid = omp_get_thread_num();
#pragma omp for
for (long int i=0; i<num_steps; ++i) {
x = (i+0.5)*step;
sumvector[myid][0] += 4.0/(1.0+x*x);
}
}
for (int i=0; i<NUMTHRDS; i++)
sum += sumvector[i][0];
#if _EXTRAE_
Extrae_event (PROGRAM, END);
Extrae_event (PROGRAM, SERIAL);
#endif
pi = step * sum;
/* print results */
printf("Number pi after %ld iterations = %.15f\n", num_steps, pi);
#if _EXTRAE_
Extrae_event (PROGRAM, END);
#else
STOP_COUNT_TIME("Total execution time");
#endif
return EXIT_SUCCESS;
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 16;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
1.c | #include <stdio.h>
#include <omp.h>
#include <stdlib.h>
#include <math.h>
int main(int argc, char** argv)
{
int threads_num = atoi(argv[1]);
unsigned long int elements = strtoul(argv[2], NULL, 10);
omp_set_num_threads(threads_num);
double* weights_array = (double*) calloc (threads_num, sizeof(double));
double* sum_array = (double*) calloc (threads_num, sizeof(double));
int part = elements / threads_num;
double current = 1.0;
#pragma omp parallel for shared(sum_array, part, elements, weights_array) firstprivate(current)
for(unsigned long int i = 0; i < elements; i++)
{
if (i == part * (omp_get_thread_num() + 1) - 1)
{
weights_array[omp_get_thread_num()] = current;
if (omp_get_thread_num() == 0)
weights_array[0] = 1;
}
current = current / (1 + i);
sum_array[omp_get_thread_num()] += current;
}
current = 1.0;
for(int i = 0; i < threads_num; i++)
{
current *= weights_array[i];
weights_array[i] = current;
}
double sum = 0.0;
#pragma omp parallel for ordered shared(sum_array, weights_array) reduction(+:sum)
for(int i = threads_num - 1; i > -1; i--)
sum += sum_array[i]*weights_array[i];
free(weights_array);
free(sum_array);
printf("exit : %.35lf\n", sum + 1);
printf("value: %.35le\n", M_E);
return 0;
}
|
15.c | #include <stdio.h>
#include <omp.h>
#include <stdbool.h>
bool is_prime(int n)
{
for (int i = 2; i * i <= n; i++)
{
if (n % i == 0)
return false;
}
return true;
}
int main(int argc, char *argv[])
{
int lo;
int hi;
printf("Enter a value :\n");
scanf("%d", &lo);
printf("Enter a value :\n");
scanf("%d", &hi);
#pragma omp parallel for ordered schedule(dynamic) num_threads(4)
for (int i = lo; i <= hi; i++)
{
if (is_prime(i))
{
#pragma omp ordered
printf("%d\n", i);
}
}
}
|
bench.c | #include <stdio.h>
#include <limits.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
// #define V 30000
int num;
int minKeySeq(int key[], int visited[], int V)
{
// Initialize min value
int min = INT_MAX, min_index;
int v;
for (v = 0; v < V; v++)
{
if (visited[v] == 0 && key[v] < min)
{
min = key[v], min_index = v;
}
}
return min_index;
}
void primMSTSeq(int **graph, int V)
{
double startSeq = omp_get_wtime();
int from[V]; // Array to store constructed MST
int key[V]; // Key values used to pick minimum weight edge in cut
int visited[V]; // To represent set of vertices not yet included in MST
int i, count;
// Initialize all keys as INFINITE
for (i = 0; i < V; i++)
key[i] = INT_MAX, visited[i] = 0;
//start with first vertex
key[0] = 0; // Make key 0 so that this vertex is picked as first vertex
from[0] = -1; // First node is always root of MST
// The MST will have V vertices
for (count = 0; count < V - 1; count++)
{
// Pick the minimum key vertex from the set of vertices
// not yet included in MST
int u = minKeySeq(key, visited, V);
// Add the picked vertex to the MST Set
visited[u] = 1;
int v;
// Update key value and from index of the adjacent vertices of
// the picked vertex. Consider only those vertices which are not yet
// included in MST
for (v = 0; v < V; v++)
if (graph[u][v] && visited[v] == 0 && graph[u][v] < key[v])
from[v] = u, key[v] = graph[u][v];
}
double endSeq = omp_get_wtime();
printf("%f, ", endSeq - startSeq);
// printMST(from, V, graph);
}
int minKey(int key[], int visited[], int V)
{
int min = INT_MAX, index, i;
omp_set_num_threads(4);
#pragma omp parallel
{
num = omp_get_num_threads();
int index_local = index;
int min_local = min;
#pragma omp for nowait
for (i = 0; i < V; i++)
{
if (visited[i] == 0 && key[i] < min_local)
{
min_local = key[i];
index_local = i;
}
}
#pragma omp critical
{
if (min_local < min)
{
min = min_local;
index = index_local;
}
}
}
return index;
}
void printMST(int from[], int n, int **graph, int V)
{
int i;
printf("Edge Weight\n");
for (i = 1; i < V; i++)
printf("%d - %d %d \n", from[i], i, graph[i][from[i]]);
}
void primMST(int **graph, int V)
{
double start = omp_get_wtime();
int from[V];
int key[V], num_threads;
int visited[V];
int i, count;
for (i = 0; i < V; i++)
key[i] = INT_MAX, visited[i] = 0;
key[0] = 0;
from[0] = -1;
for (count = 0; count < V - 1; count++)
{
int u = minKey(key, visited, V);
visited[u] = 1;
int v;
#pragma omp parallel for schedule(static)
for (v = 0; v < V; v++)
{
if (graph[u][v] && visited[v] == 0 && graph[u][v] < key[v])
from[v] = u, key[v] = graph[u][v];
}
}
double end = omp_get_wtime();
printf("%f, %d\n", end - start, num);
// printMST(from, V, graph);
// printf("\n%d threads are created in primMST\n", num_threads);
}
int main(int argc, char *argv[])
{
// int graph[V][V];
int V = atoi(argv[1]);
int **graph = (int **)malloc(V * sizeof(int *));
for (int x=0; x<V; x++)
graph[x] = (int *)malloc(V * sizeof(int));
int i, j;
//Generate random adjacency matrix
srand(time(NULL));
for (i = 0; i < V; i++)
for (j = 0; j < V; j++)
graph[i][j] = rand() % 10;
for (i = 0; i < V; i++)
{
graph[i][i] = 0;
}
for (i = 0; i < V; i++)
for (j = 0; j < V; j++)
graph[j][i] = graph[i][j];
//Print adjacency matrix
// for (i = 0; i < V; i++)
// {
// for (j = 0; j < V; j++)
// {
// printf("%d ", graph[i][j]);
// }
// printf("\n");
// }
printf("%d, ", V);
primMSTSeq(graph, V);
primMST(graph, V);
return 0;
}
|
OscarIntegration.h | #pragma once
#include <algorithm>
#include <execution>
#include <iostream>
#include <mutex>
#include <random>
#include <unordered_map>
#include <unordered_set>
#include <thread>
#include <path_finder/storage/CellIdStore.h>
#include <path_finder/graphs/CHGraph.h>
namespace pathFinder {
inline namespace std_threads_variant {
class OscarIntegrator {
public:
template <typename GeoPoint, typename CellIdsForEdge>
static void writeCellIdsForEdges(const CHGraph &graph, CellIdStore &cellIdStore, CellIdsForEdge edge2CellIds, std::size_t numThreads = 1) {
struct State {
const CHGraph &graph;
CellIdStore &cellIdStore;
CellIdsForEdge & edge2CellIds;
std::vector<std::atomic<uint64_t>> finishedNodes;
std::atomic<std::size_t> progress{0};
std::size_t edgeProgress{0};
const std::size_t numberOfNodes;
std::mutex cellIdStoreLock;
State(const CHGraph &graph, CellIdStore &cellIdStore, CellIdsForEdge & edge2CellIds) :
graph(graph),
cellIdStore(cellIdStore),
edge2CellIds(edge2CellIds),
finishedNodes(graph.getNumberOfNodes() / 64 + 1),
numberOfNodes(graph.getNumberOfNodes())
{
for(auto & x : finishedNodes) {
x = 0;
}
}
bool takeNode(NodeId nodeId) {
std::size_t chunk = nodeId / 64;
std::size_t bit = nodeId % 64;
uint64_t flag = static_cast<uint64_t>(1) << bit;
uint64_t prev = finishedNodes.at(chunk).fetch_or(flag, std::memory_order_relaxed);
if (prev & flag) { //already taken
return false;
}
auto p = progress.fetch_add(1, std::memory_order_relaxed);
if (p%1000 == 0) {
std::cout << '\xd' << p << "/" << numberOfNodes << "=" << double(p)/numberOfNodes*100 << "%" << std::flush;
}
return true;
}
} state(graph, cellIdStore, edge2CellIds);
//A worker explores the graph depth-first
//For each node we store the hint given by the cellId operator and reuse it when backtracking
//Since multiple workers work in parallel we have to make sure that a node is not visited twice
//This is tracked in the finishedNodes vector
//If the node queue is empty then we choose a new random node and try to explore from there
//If the node is already taken then we sample all nodes from the beginning to make sure that each node is visited
struct Worker {
struct DFSElement {
decltype(graph.edgesFor(0, EdgeDirection::FORWARD)) edges;
decltype(graph.edgesFor(0, EdgeDirection::FORWARD).begin()) it;
typename CellIdsForEdge::Hint hint;
DFSElement(decltype(edges) const & edges, decltype(hint) hint) :
edges(edges),
it(edges.begin()),
hint(hint)
{}
};
State *state;
CellIdsForEdge edge2CellIds;
std::vector<DFSElement> stack;
std::default_random_engine rndgen;
std::uniform_int_distribution<uint32_t> nodeIdRnd;
std::vector< std::pair<std::size_t, std::vector<uint32_t>> > buffer; //edgeId -> cellIds
std::size_t apxBufferSizeInBytes{0};
Worker(State *state) :
state(state),
edge2CellIds(state->edge2CellIds),
nodeIdRnd(0, state->numberOfNodes-1)
{}
~Worker() {
flush();
}
void operator()() {
while (true) {
if (state->progress >= state->numberOfNodes) {
break;
}
// first try a random nodeId if that fails, sample all
NodeId nid = nodeIdRnd(rndgen);
if (!state->takeNode(nid)) { //try all from the beginning
nid = state->numberOfNodes;
for(std::size_t i(0), s(state->finishedNodes.size()); i < s && nid >= state->numberOfNodes; ++i) {
uint64_t tmp = state->finishedNodes[i].load(std::memory_order_relaxed);
while(tmp != std::numeric_limits<uint64_t>::max() && nid >= state->numberOfNodes) {
tmp = ~tmp;
static_assert(std::is_same<unsigned long, decltype(tmp)>::value);
//tmp cannot be 0 otherwise we wouldn't be here
nid = i*64 + 63-__builtin_clzl(tmp);
if (!state->takeNode(nid)) {
nid = state->numberOfNodes;
tmp = state->finishedNodes[i].load(std::memory_order_relaxed);
}
}
}
if (nid >= state->numberOfNodes) {
//No node found, this means that all were processed during search for a new one
flush();
assert(state->progress == state->numberOfNodes);
break;
}
}
assert(nid < state->numberOfNodes);
{
auto node = state->graph.getNode(nid);
typename CellIdsForEdge::Hint fh;
//Get the face hint for this node
edge2CellIds(node.latLng.lat, node.latLng.lng, node.latLng.lat, node.latLng.lng, fh);
stack.emplace_back(state->graph.edgesFor(nid, EdgeDirection::FORWARD), fh);
}
while (stack.size()) {
//Check if we're at the end of our edge list
if (stack.back().it == stack.back().edges.end()) {
stack.pop_back();
continue;
}
//expand next edge
auto edge = *stack.back().it;
++stack.back().it; //move to next edge immediately in case we need to skip this edge
if (edge.child1.has_value()) { //skip shortcut edges
continue;
}
const auto sourceNode = state->graph.getNode(edge.source);
const auto targetNode = state->graph.getNode(edge.target);
auto hint = stack.back().hint;
auto edgePos = state->graph.getEdgePosition(edge, EdgeDirection::FORWARD);
if (edgePos) {
buffer.emplace_back(
edgePos.value(),
edge2CellIds( sourceNode.latLng.lat, sourceNode.latLng.lng,
targetNode.latLng.lat, targetNode.latLng.lng,
hint
)
);
}
else {
std::cerr << "BUG: Edge " << edge << " has no forward position" << std::endl;
}
apxBufferSizeInBytes += sizeof(typename std::decay_t<decltype(buffer)>::value_type) + buffer.back().second.size()*sizeof(uint32_t);
//check if we can descend into the node
if (state->takeNode(targetNode.id)) {
stack.emplace_back(state->graph.edgesFor(targetNode.id, EdgeDirection::FORWARD), hint);
}
//check if we need to flush our buffer
if (apxBufferSizeInBytes > 128*1024*1024) {
flush();
}
}
}
}
void flush() {
if (buffer.size()) {
std::lock_guard<std::mutex> lck(state->cellIdStoreLock);
for(auto & x : buffer) {
state->cellIdStore.storeCellIds(x.first, std::move(x.second));
}
state->edgeProgress += buffer.size();
buffer.clear();
apxBufferSizeInBytes = 0;
}
}
};
std::cout << "Computing cell ids for regular edges..." << std::endl;
if (numThreads > 1) {
std::vector<std::thread> threads;
threads.reserve(numThreads);
for(std::size_t i(0); i < numThreads; ++i) {
threads.emplace_back(Worker(&state));
}
for(auto & x : threads) {
x.join();
}
}
else {
Worker w(&state);
w();
}
std::cout << "\nFound " << state.edgeProgress << " regular edges our of a total of " << graph.getNumberOfEdges() << std::endl;
struct PendingEdge {
uint8_t pending = 2;
uint32_t edgePos;
};
int progress = 0;
const auto &edges = graph.getEdges();
std::unordered_map<uint32_t, uint8_t> pendingChildren; //shortcut -> num unfinished children
std::unordered_multimap<uint32_t, uint32_t> edgeParents; //maps from shortcut-children->shortcuts, note that an edge may have multiple parents
std::unordered_set<uint32_t> edgesWithChildren; //contains all shortcuts for which both children have their cellids computed
//first get all shortcuts and set them as parent of their respective children
std::cout << "Computing shortcut dependency tree..." << std::flush;
for (uint32_t i(0), s(graph.getNumberOfEdges()); i < s; ++i) {
const auto &edge = edges[i];
if (edge.child1.has_value()) {
assert(edge.child1 != edge.child2);
edgeParents.emplace(edge.child1.value(), i);
edgeParents.emplace(edge.child2.value(), i);
pendingChildren[i] = 2;
}
else {
state.edgeProgress -= 1;
}
}
std::cout << "done" << std::endl;
assert(!state.edgeProgress);
//Find shortcuts that have a regular edge as a parent
std::cout << "Computing cell ids for shortcut edges..." << std::flush;
for (uint32_t i(0), s(graph.getNumberOfEdges()); i < s; ++i) {
auto const & edge = edges[i];
if (edge.child1.has_value()) {
continue;
}
if (!edgeParents.count(i)) {
continue;
}
auto parents = edgeParents.equal_range(i);
for(auto it(parents.first); it != parents.second; ++it) {
auto parent = it->second;
auto & x = pendingChildren.at(parent);
x -= 1;
assert(x <= 2);
if (x == 0) { //our parent has all of its children set
edgesWithChildren.insert(parent);
pendingChildren.erase(parent);
}
}
}
//now compute the union of the cellids of the children
//We do this recursivley kind of bottom up, but the unordered_set defines the actual order
//however this should not reduce the performance since the edges have static dependencies and
//thus the work to be done does not depend on the order we do it (apart from cache issues)
while (edgesWithChildren.size()) {
uint32_t edgeId = *edgesWithChildren.begin();
auto const & edge = edges[edgeId];
//get the cellids of the children, these are sorted, thus we can use std::set_union
auto c1cids = cellIdStore.getCellIds(edge.child1.value());
auto c2cids = cellIdStore.getCellIds(edge.child2.value());
assert(std::is_sorted(c1cids.begin(), c1cids.end()));
assert(std::is_sorted(c2cids.begin(), c2cids.end()));
std::vector<std::decay_t<decltype(c2cids)>::value_type> ecids;
std::set_union(c1cids.begin(), c1cids.end(), c2cids.begin(), c2cids.end(), std::back_inserter(ecids));
cellIdStore.storeCellIds(edgeId, std::move(ecids));
//take care of parent
auto parents = edgeParents.equal_range(edgeId);
for(auto it(parents.first); it != parents.second; ++it) {
auto parent = it->second;
auto & x = pendingChildren.at(parent);
x -= 1;
assert(x <= 2);
if (x == 0) { //our parent has all of its children set
edgesWithChildren.insert(parent);
pendingChildren.erase(parent);
}
}
//remove ourself
edgesWithChildren.erase(edgeId);
}
std::cout << "done" << std::endl;
if (pendingChildren.size()) {
throw std::runtime_error("Could not compute all shortcut cellids");
}
cellIdStore.shrink_to_fit();
}
};
}
namespace omp_variant {
class OscarIntegrator {
public:
template <typename GeoPoint, typename CellIdsForEdge, typename KVStore>
static void writeCellIdsForEdges(const CHGraph &graph, CellIdStore &cellIdStore,
KVStore &store) {
const auto& edges = graph.getEdges();
int progress = 0;
#pragma omp parallel for default(none) shared(edges, graph, cellIdStore, store, progress, std::cout) num_threads(16)
for (int i = 0; i < graph.getNumberOfNodes(); ++i) {
CellIdsForEdge cellIdsForEdge(store);
for(const auto& edge : graph.edgesFor(i, EdgeDirection::FORWARD)) {
if (edge.child1.has_value()) {
continue;
}
std::vector<uint32_t> cellIds;
const auto sourceNode = graph.getNode(edge.source);
const auto targetNode = graph.getNode(edge.target);
GeoPoint sourcePoint;
sourcePoint.lat() = sourceNode.latLng.lat;
sourcePoint.lon() = sourceNode.latLng.lng;
GeoPoint targetPoint;
targetPoint.lat() = targetNode.latLng.lat;
targetPoint.lon() = targetNode.latLng.lng;
try {
auto cellIdsEdge = cellIdsForEdge(sourcePoint, targetPoint);
cellIds.insert(cellIds.end(), cellIdsEdge.begin(), cellIdsEdge.end());
} catch (std::exception &e) {
}
cellIds.erase(std::remove(cellIds.begin(), cellIds.end(), 4294967295), cellIds.end());
#pragma omp critical
{
cellIdStore.storeCellIds(graph.getEdgePosition(edge, EdgeDirection::FORWARD).value(), cellIds);
++progress;
if (false) //progress % 1000 == 0)
std::cout << "progress: " << progress << "/" << graph.getNumberOfEdges() << '\n';
// std::cout << "count: " << cellIds.size() << '\n';
}
}
}
for (int i = 0; i < graph.getNumberOfEdges(); ++i) {
const auto &edge = edges[i];
if (edge.child1.has_value()) {
const auto fullEdges = graph.getPathFromShortcut(edge, 0);
std::vector<size_t> fullEdgeIds;
fullEdgeIds.reserve(fullEdges.size());
for (const auto fullEdge : fullEdges) {
fullEdgeIds.emplace_back(graph.getEdgePosition(fullEdge, EdgeDirection::FORWARD).value());
}
auto fullCellIds = cellIdStore.getCellIds(fullEdgeIds);
sort(fullCellIds.begin(), fullCellIds.end());
(fullCellIds).erase(unique(fullCellIds.begin(), fullCellIds.end()), fullCellIds.end());
cellIdStore.storeCellIds(i, fullCellIds);
++progress;
if (progress % 1000 == 0)
std::cout << "progress: " << progress << "/" << graph.getNumberOfEdges() << '\n';
}
}
cellIdStore.shrink_to_fit();
}
};
}
} // namespace pathFinder
|
test-libmvec-alias-mod.c | /* Part of test to build shared library to ensure link against
*_finite aliases from libmvec.
Copyright (C) 2016-2017 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
#define N 4000
FLOAT log_arg[N];
FLOAT exp_arg[N];
FLOAT log_res[N];
FLOAT exp_res[N];
FLOAT pow_res[N];
int arch_check = 1;
static void
init_arg (void)
{
int i;
CHECK_ARCH_EXT;
arch_check = 0;
for (i = 0; i < N; i += 1)
{
log_arg[i] = 1.0;
exp_arg[i] = 0.0;
}
}
int
test_finite_alias (void)
{
int i;
init_arg ();
if (arch_check) return 77;
#pragma omp simd
for (i = 0; i < N; i += 1)
{
log_res[i] = FUNC (log) (log_arg[i]);
exp_res[i] = FUNC (exp) (exp_arg[i]);
pow_res[i] = FUNC (pow) (log_arg[i], log_arg[i]);
}
if (log_res[0] != 0.0) return 1;
if (exp_res[0] != 1.0) return 1;
if (pow_res[0] != 1.0) return 1;
return 0;
}
|
concat_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: jjzeng@openailab.com
*/
#include <math.h>
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "concat_param.h"
#include "compiler_fp16.h"
struct shape_dim
{
int dim[4];
float scale;
int zero;
};
struct concat_op_param
{
struct shape_dim* input_shape;
int input_counts;
int input_dim;
struct shape_dim output_shape;
int output_dim;
int axis;
float out_scale;
void** input_data;
};
static int ref_concat_fp32(const float** in_data, float* out_data, const struct concat_op_param* param)
{
int axis = param->axis;
int concat_dim = 0;
for (int ii = 0; ii < param->input_counts; ++ii)
{
concat_dim += param->input_shape[ii].dim[axis];
}
if (concat_dim != param->output_shape.dim[axis])
{
fprintf(stderr, "concant dimensions[%d] is not same output[%d]\n", concat_dim, param->output_shape.dim[axis]);
return -1;
}
int out_size, in_size;
out_size = 1;
for (int ii = 0; ii < axis; ++ii)
{
out_size *= param->output_shape.dim[ii];
}
in_size = 1;
for (int ii = axis + 1; ii < param->output_dim; ++ii)
{
in_size *= param->input_shape[0].dim[ii];
}
float* output_ptr = out_data;
for (int k = 0; k < out_size; ++k)
{
// #pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < param->input_counts; ++j)
{
int cp_size = param->input_shape[j].dim[axis] * in_size;
memcpy(output_ptr, in_data[j] + k * cp_size, cp_size * sizeof(float));
output_ptr += cp_size;
}
}
return 0;
}
static int ref_concat_fp16(const __fp16** in_data, __fp16* out_data, const struct concat_op_param* param)
{
int axis = param->axis;
int concat_dim = 0;
for(int ii = 0; ii < param->input_counts; ++ii)
{
concat_dim += param->input_shape[ii].dim[axis];
}
if(concat_dim != param->output_shape.dim[axis])
{
printf("concat dimensions is not same output: ( %d -- %d )\n", concat_dim, param->output_shape.dim[axis]);
return -1;
}
int out_size, in_size;
out_size = 1;
for(int ii = 0; ii < axis; ++ii)
{
out_size *= param->output_shape.dim[ii];
}
in_size = 1;
for(int ii = axis + 1; ii < param->output_dim; ++ii)
{
in_size *= param->input_shape[0].dim[ii];
}
__fp16* output_ptr = out_data;
for(int k = 0; k < out_size; ++k)
{
for(int j = 0; j < param->input_counts; ++j)
{
int cp_size = param->input_shape[j].dim[axis] * in_size;
memcpy(output_ptr, in_data[j] + k * cp_size, cp_size * sizeof(__fp16));
output_ptr += cp_size;
}
}
return 0;
}
static int ref_concat_uint8(const uint8_t** in_data, uint8_t* out_data, const struct concat_op_param* param)
{
int axis = param->axis;
int concat_dim = 0;
for (int ii = 0; ii < param->input_counts; ++ii)
{
concat_dim += param->input_shape[ii].dim[axis];
}
if (concat_dim != param->output_shape.dim[axis])
{
fprintf(stderr, "concat dimensions is not same output: ( %d -- %d )\n", concat_dim, param->output_shape.dim[axis]);
return -1;
}
int outer_size, in_size;
outer_size = 1;
for (int ii = 0; ii < axis; ++ii)
{
outer_size *= param->output_shape.dim[ii];
}
in_size = 1;
for (int ii = axis + 1; ii < param->output_dim; ++ii)
{
in_size *= param->output_shape.dim[ii];
}
int output_size = 1;
for (int ii = 0; ii < param->output_dim; ++ii)
{
output_size *= param->output_shape.dim[ii];
}
uint8_t* output_ptr = out_data;
float out_scale = param->output_shape.scale;
uint8_t out_zero = param->output_shape.zero;
for (int k = 0; k < outer_size; ++k)
{
for (int j = 0; j < param->input_counts; ++j)
{
int cp_size = param->input_shape[j].dim[axis] * in_size;
float scale = param->input_shape[j].scale;
uint8_t input_zero = param->input_shape[j].zero;
const uint8_t* input_ptr = ( const uint8_t* )(in_data[j] + k * cp_size);
if (scale == out_scale && input_zero == out_zero)
{
memcpy(output_ptr, input_ptr, cp_size);
}
else
{
float t_scale = scale / out_scale;
for (int ii = 0; ii < cp_size; ++ii)
{
output_ptr[ii] = round((input_ptr[ii] - input_zero) * t_scale) + out_zero;
}
}
output_ptr += cp_size;
}
}
return 0;
}
static int ref_concat_int8(const int8_t** in_data, int8_t* out_data, const struct concat_op_param* param)
{
int axis = param->axis;
int concat_dim = 0;
for (int ii = 0; ii < param->input_counts; ++ii)
{
concat_dim += param->input_shape[ii].dim[axis];
}
if (concat_dim != param->output_shape.dim[axis])
{
fprintf(stderr, "concat dimensions is not same output: ( %d -- %d )\n", concat_dim, param->output_shape.dim[axis]);
return -1;
}
int outer_size, in_size;
outer_size = 1;
for (int ii = 0; ii < axis; ++ii)
{
outer_size *= param->output_shape.dim[ii];
}
in_size = 1;
for (int ii = axis + 1; ii < param->output_dim; ++ii)
{
in_size *= param->output_shape.dim[ii];
}
int output_size = 1;
for (int ii = 0; ii < param->output_dim; ++ii)
{
output_size *= param->output_shape.dim[ii];
}
int8_t* output_ptr = out_data;
float output_scale = param->output_shape.scale;
for (int k = 0; k < outer_size; ++k)
{
for (int j = 0; j < param->input_counts; ++j)
{
int cp_size = param->input_shape[j].dim[axis] * in_size;
float input_scale = param->input_shape[j].scale;
const int8_t* input_ptr = ( const int8_t* )(in_data[j] + k * cp_size);
if (input_scale == output_scale)
{
memcpy(output_ptr, input_ptr, cp_size);
}
else
{
float requant_scale = input_scale / output_scale;
for (int ii = 0; ii < cp_size; ++ii)
{
int data_i32 = round((float )input_ptr[ii] * requant_scale);
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_ptr[ii] = (int8_t)data_i32;
}
}
output_ptr += cp_size;
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct concat_op_param* concat_op_param = ( struct concat_op_param* )sys_malloc(sizeof(struct concat_op_param));
concat_op_param->axis = 0;
concat_op_param->input_counts = 1;
concat_op_param->input_dim = 1;
concat_op_param->input_shape = NULL;
concat_op_param->out_scale = 0.1f;
concat_op_param->output_dim = 1;
exec_node->ops_priv = concat_op_param;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
sys_free(exec_node->ops_priv);
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* output_tensor;
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv;
struct concat_param* concat_param = ( struct concat_param* )ir_node->op.param_mem;
concat_op_param->axis = concat_param->axis;
concat_op_param->input_counts = ir_node->input_num;
concat_op_param->input_shape = ( struct shape_dim* )sys_malloc(sizeof(struct shape_dim) * ir_node->input_num);
concat_op_param->output_dim = output_tensor->dim_num;
for (int ii = 0; ii < output_tensor->dim_num; ii++)
{
concat_op_param->output_shape.dim[ii] = output_tensor->dims[ii];
concat_op_param->output_shape.scale = output_tensor->scale;
concat_op_param->output_shape.zero = output_tensor->zero_point;
}
concat_op_param->input_data = ( void* )sys_malloc(sizeof(void*) * ir_node->input_num);
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv;
void* out_data = output_tensor->data;
for (int i = 0; i < ir_node->input_num; i++)
{
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[i]);
int number = input_tensor->dim_num;
for (int j = 0; j < number; j++)
{
concat_op_param->input_shape[i].dim[j] = input_tensor->dims[j];
concat_op_param->input_shape[i].scale = input_tensor->scale;
concat_op_param->input_shape[i].zero = input_tensor->zero_point;
}
concat_op_param->input_data[i] = input_tensor->data;
}
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_concat_fp32(( const float** )concat_op_param->input_data, out_data, concat_op_param);
else if (input_tensor->data_type == TENGINE_DT_FP16)
ret = ref_concat_fp16(( const __fp16** )concat_op_param->input_data, out_data, concat_op_param);
else if (input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_concat_uint8(( const uint8_t** )concat_op_param->input_data, out_data, concat_op_param);
else if (input_tensor->data_type == TENGINE_DT_INT8)
ret = ref_concat_int8(( const int8_t** )concat_op_param->input_data, out_data, concat_op_param);
else
printf("Input data type %d not to be supported.\n", input_tensor->data_type);
return ret;
}
static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv;
sys_free(concat_op_param->input_shape);
sys_free(concat_op_param->input_data);
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = postrun,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_concat_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_CONCAT, &hcl_node_ops);
}
static int unreg_concat_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_CONCAT, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_concat_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_concat_hcl_ops);
|
GB_binop__lor_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lor_int16)
// A.*B function (eWiseMult): GB (_AemultB_01__lor_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__lor_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__lor_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_int16)
// A*D function (colscale): GB (_AxD__lor_int16)
// D*A function (rowscale): GB (_DxB__lor_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__lor_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__lor_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_int16)
// C=scalar+B GB (_bind1st__lor_int16)
// C=scalar+B' GB (_bind1st_tran__lor_int16)
// C=A+scalar GB (_bind2nd__lor_int16)
// C=A'+scalar GB (_bind2nd_tran__lor_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) || (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_INT16 || GxB_NO_LOR_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lor_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lor_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lor_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lor_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lor_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lor_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lor_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lor_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lor_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lor_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lor_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__ainv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_int8_int8)
// op(A') function: GB (_unop_tran__ainv_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = -z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_int16_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int16_uint16
// op(A') function: GB_tran__abs_int16_uint16
// C type: int16_t
// A type: uint16_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int16_uint16
(
int16_t *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_winograd_transform_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 6;
const int h_tiles = (h - 2) / 6;
const int tiles = w_tiles * h_tiles;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _r06 = vld1q_f32(r0 + 24);
float32x4_t _r07 = vld1q_f32(r0 + 28);
float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f);
float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f);
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f);
float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
vst1q_f32(tmp[5][m], _tmp5m);
vst1q_f32(tmp[6][m], _tmp6m);
vst1q_f32(tmp[7][m], _tmp7m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
float* r0_tm_6 = r0_tm_0 + tiles * 24;
float* r0_tm_7 = r0_tm_0 + tiles * 28;
for (int m = 0; m < 8; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f);
float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f);
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f);
float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
vst1q_f32(r0_tm_6, _r0tm6);
vst1q_f32(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 32;
r0_tm_1 += tiles * 32;
r0_tm_2 += tiles * 32;
r0_tm_3 += tiles * 32;
r0_tm_4 += tiles * 32;
r0_tm_5 += tiles * 32;
r0_tm_6 += tiles * 32;
r0_tm_7 += tiles * 32;
}
}
}
}
}
static void conv3x3s1_winograd63_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[6][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
const float* output0_tm_6 = output0_tm_0 + tiles * 24;
const float* output0_tm_7 = output0_tm_0 + tiles * 28;
float* output0 = out0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _out0tm6 = vld1q_f32(output0_tm_6);
float32x4_t _out0tm7 = vld1q_f32(output0_tm_7);
float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f));
float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f));
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
vst1q_f32(tmp[5][m], _tmp5m);
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06);
float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 12, _out03);
vst1q_f32(output0 + 16, _out04);
vst1q_f32(output0 + 20, _out05);
output0 += outw * 4;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 4;
const int h_tiles = (h - 2) / 4;
const int tiles = w_tiles * h_tiles;
// const float itm[6][6] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[6][6][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f);
float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f);
float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f);
float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
vst1q_f32(tmp[5][m], _tmp5m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f);
float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 24;
r0_tm_1 += tiles * 24;
r0_tm_2 += tiles * 24;
r0_tm_3 += tiles * 24;
r0_tm_4 += tiles * 24;
r0_tm_5 += tiles * 24;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 4;
const int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[4][6][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
float* output0 = out0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b);
float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f);
float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f);
float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 0; m < 4; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b));
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 12, _out03);
output0 += outw * 4;
}
}
}
}
}
|
distribute_dispatch.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#define WORK_SIZE 64
int main() {
int i;
#pragma omp teams num_teams(4) thread_limit(1)
#pragma omp distribute dist_schedule(static, WORK_SIZE / 4)
for (i = 0; i < WORK_SIZE; i++) {}
return 0;
}
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_work'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dispatch'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[THREAD_ID0:[0-9]+]]: ompt_event_distribute_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID0:[0-9]+]]
// CHECK-SAME: parent_task_id=[[TASK_ID0:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID0]]: ompt_event_distribute_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID0]], task_id=[[TASK_ID0]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16
// CHECK: {{^}}[[THREAD_ID1:[0-9]+]]: ompt_event_distribute_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID1:[0-9]+]]
// CHECK-SAME: parent_task_id=[[TASK_ID1:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID1]]: ompt_event_distribute_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID1]], task_id=[[TASK_ID1]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16
// CHECK: {{^}}[[THREAD_ID2:[0-9]+]]: ompt_event_distribute_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID2:[0-9]+]]
// CHECK-SAME: parent_task_id=[[TASK_ID2:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID2]]: ompt_event_distribute_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID2]], task_id=[[TASK_ID2]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16
// CHECK: {{^}}[[THREAD_ID3:[0-9]+]]: ompt_event_distribute_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID3:[0-9]+]]
// CHECK-SAME: parent_task_id=[[TASK_ID3:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID3]]: ompt_event_distribute_chunk_begin:
// CHECK-SAME: parallel_id=[[PARALLEL_ID3]], task_id=[[TASK_ID3]]
// CHECK-SAME: chunk_start={{[0-9]+}}, chunk_iterations=16
|
DRB044-adi-tile-no.c | /**
* adi.c: This file is part of the PolyBench/C 3.2 test suite.
* Alternating Direction Implicit solver with tiling and nested SIMD.
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
* License: /LICENSE.OSU.txt
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include "polybench/polybench.h"
/* Include benchmark-specific header. */
/* Default data type is double, default size is 10x1024x1024. */
#include "polybench/adi.h"
/* Array initialization. */
static void init_array(int n,double X[500 + 0][500 + 0],double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int i;
//int j;
{
int c1;
int c3;
int c2;
int c4;
if (n >= 1) {
#pragma omp parallel for private(c4, c2, c3)
for (c1 = 0; c1 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c1++) {
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < n + -1?16 * c1 + 15 : n + -1)); c3++) {
#pragma omp simd
for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c4++) {
X[c3][c4] = (((double )c3) * (c4 + 1) + 1) / n;
A[c3][c4] = (((double )c3) * (c4 + 2) + 2) / n;
B[c3][c4] = (((double )c3) * (c4 + 3) + 3) / n;
}
}
}
}
}
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int n,double X[500 + 0][500 + 0])
{
int i;
int j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr,"%0.2lf ",X[i][j]);
if ((i * 500 + j) % 20 == 0)
fprintf(stderr,"\n");
}
fprintf(stderr,"\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_adi(int tsteps,int n,double X[500 + 0][500 + 0],double A[500 + 0][500 + 0],double B[500 + 0][500 + 0])
{
//int t;
//int i1;
//int i2;
//#pragma scop
{
int c0;
int c2;
int c8;
int c9;
int c15;
if (n >= 1 && tsteps >= 1) {
for (c0 = 0; c0 <= tsteps + -1; c0++) {
if (n >= 2) {
#pragma omp parallel for private(c15, c9, c8)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
B[c15][c9] = B[c15][c9] - A[c15][c9] * A[c15][c9] / B[c15][c9 - 1];
}
}
}
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c15][c9] = X[c15][c9] - X[c15][c9 - 1] * A[c15][c9] / B[c15][c9 - 1];
}
}
}
for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {
for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c15][n - c9 - 2] = (X[c15][n - 2 - c9] - X[c15][n - 2 - c9 - 1] * A[c15][n - c9 - 3]) / B[c15][n - 3 - c9];
}
}
}
}
}
#pragma omp parallel for private(c15)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c15][n - 1] = X[c15][n - 1] / B[c15][n - 1];
}
}
if (n >= 2) {
#pragma omp parallel for private(c15, c9, c8)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
B[c9][c15] = B[c9][c15] - A[c9][c15] * A[c9][c15] / B[c9 - 1][c15];
}
}
}
for (c8 = 0; c8 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c8++) {
for (c9 = (1 > 16 * c8?1 : 16 * c8); c9 <= ((16 * c8 + 15 < n + -1?16 * c8 + 15 : n + -1)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[c9][c15] = X[c9][c15] - X[c9 - 1][c15] * A[c9][c15] / B[c9 - 1][c15];
}
}
}
for (c8 = 0; c8 <= (((n + -3) * 16 < 0?((16 < 0?-((-(n + -3) + 16 + 1) / 16) : -((-(n + -3) + 16 - 1) / 16))) : (n + -3) / 16)); c8++) {
for (c9 = 16 * c8; c9 <= ((16 * c8 + 15 < n + -3?16 * c8 + 15 : n + -3)); c9++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[n - 2 - c9][c15] = (X[n - 2 - c9][c15] - X[n - c9 - 3][c15] * A[n - 3 - c9][c15]) / B[n - 2 - c9][c15];
}
}
}
}
}
#pragma omp parallel for private(c15)
for (c2 = 0; c2 <= (((n + -1) * 16 < 0?((16 < 0?-((-(n + -1) + 16 + 1) / 16) : -((-(n + -1) + 16 - 1) / 16))) : (n + -1) / 16)); c2++) {
#pragma omp simd
for (c15 = 16 * c2; c15 <= ((16 * c2 + 15 < n + -1?16 * c2 + 15 : n + -1)); c15++) {
X[n - 1][c15] = X[n - 1][c15] / B[n - 1][c15];
}
}
}
}
}
//#pragma endscop
}
int main(int argc,char **argv)
{
omprace_init();
/* Retrieve problem size. */
int n = 500;
int tsteps = 10;
/* Variable declaration/allocation. */
double (*X)[500 + 0][500 + 0];
X = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*A)[500 + 0][500 + 0];
A = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
double (*B)[500 + 0][500 + 0];
B = ((double (*)[500 + 0][500 + 0])(polybench_alloc_data(((500 + 0) * (500 + 0)),(sizeof(double )))));
;
/* Initialize array(s). */
init_array(n, *X, *A, *B);
/* Start timer. */
polybench_timer_start();
;
/* Run kernel. */
kernel_adi(tsteps,n, *X, *A, *B);
/* Stop and print timer. */
polybench_timer_stop();
;
polybench_timer_print();
;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
if (argc > 42 && !strcmp(argv[0],""))
print_array(n, *X);
/* Be clean. */
free(((void *)X));
;
free(((void *)A));
;
free(((void *)B));
;
omprace_fini();
return 0;
}
|
GB_binop__pow_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__pow_fp64
// A.*B function (eWiseMult): GB_AemultB__pow_fp64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__pow_fp64
// C+=b function (dense accum): GB_Cdense_accumb__pow_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_fp64
// C=scalar+B GB_bind1st__pow_fp64
// C=scalar+B' GB_bind1st_tran__pow_fp64
// C=A+scalar GB_bind2nd__pow_fp64
// C=A'+scalar GB_bind2nd_tran__pow_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = GB_pow (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_pow (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_FP64 || GxB_NO_POW_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__pow_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__pow_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__pow_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__pow_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__pow_fp64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__pow_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = GB_pow (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__pow_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = GB_pow (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = GB_pow (x, aij) ; \
}
GrB_Info GB_bind1st_tran__pow_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = GB_pow (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__pow_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
target_teams_distribute_parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd foo
void test_no_clause(void) {
int i;
#pragma omp target teams distribute parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target teams distribute parallel for simd' must be a for loop}}
#pragma omp target teams distribute parallel for simd
++i;
}
void test_branch_protected_scope(void) {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target teams distribute parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause(void) {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
#pragma omp target teams distribute parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers(void) {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
#pragma omp target teams distribute parallel for simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
#pragma omp target teams distribute parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
#pragma omp target teams distribute parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo(void);
void test_collapse(void) {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute parallel for simd collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target teams distribute parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
#pragma omp target teams distribute parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target teams distribute parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute parallel for simd', but found only 1}}
// expected-error@+1 {{integer constant expression}}
#pragma omp target teams distribute parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp target teams distribute parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target teams distribute parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-error@+4 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp target teams distribute parallel for simd collapse(2) firstprivate(i) // expected-note {{defined as firstprivate}}
for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp target teams distribute parallel for simd' directive may not be firstprivate, predetermined as lastprivate}}
for (int j = 0; j < 16; ++j)
#pragma omp parallel for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private(void) {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate(void) {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target teams distribute parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target teams distribute parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate(void) {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
// expected-error@+1 {{lastprivate variable cannot be firstprivate}} expected-note@+1 {{defined as lastprivate}}
#pragma omp target teams distribute parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{lastprivate variable cannot be firstprivate}} expected-note@+1 2 {{defined as lastprivate}}
#pragma omp target teams distribute parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 3 {{lastprivate variable cannot be firstprivate}} expected-note@+1 3 {{defined as lastprivate}}
#pragma omp target teams distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target teams distribute parallel for simd simdlen(64) safelen(8)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages(void) {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target teams distribute parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_nontemporal(void) {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp target teams distribute parallel for simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp target teams distribute parallel for simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp target teams distribute parallel for simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp target teams distribute parallel for simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp target teams distribute parallel for simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp target teams distribute parallel for simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target teams distribute parallel for simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp target teams distribute parallel for simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp target teams distribute parallel for simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp target teams distribute parallel for simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute parallel for simd'}}
#pragma omp target teams distribute parallel for simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
}
|
move_shallow_water_particle_utility.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Miguel Maso Sotomayor
// Pablo Becker
//
#ifndef KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED
#define KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED
///@defgroup MoveShallowWaterParticleUtility
///@brief Utility to move particles on the eulerian mesh with an
/// explicit scheme. This is the basic tool of the pfem2 framework
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/node.h"
#include "includes/checks.h"
#include "includes/dof.h"
#include "includes/variables.h"
#include "containers/array_1d.h"
#include "containers/data_value_container.h"
#include "includes/mesh.h"
#include "utilities/math_utils.h"
#include "includes/global_pointer_variables.h"
#include "processes/node_erase_process.h"
#include "utilities/geometry_utilities.h"
#include "includes/model_part.h"
#include "includes/kratos_parameters.h"
#include "spatial_containers/spatial_containers.h"
#include "spatial_containers/cell.h"
#include "spatial_containers/bins_dynamic_objects.h"
#include "utilities/spatial_containers_configure.h"
#include "geometries/line_2d_2.h"
#include "geometries/triangle_2d_3.h"
#include "geometries/triangle_3d_3.h"
#include "geometries/point.h"
#include "shallow_water_application_variables.h"
#include "shallow_water_particle.h"
#include "utilities/openmp_utils.h"
#include "time.h"
//#include "processes/process.h"
namespace Kratos
{
//this class is to be modified by the user to customize the interpolation process
template< unsigned int TDim>
class MoveShallowWaterParticleUtility
{
public:
typedef SpatialContainersConfigure<TDim> Configure;
typedef typename Configure::PointType PointType;
typedef typename Configure::ContainerType ContainerType;
typedef typename Configure::IteratorType IteratorType;
typedef typename Configure::ResultContainerType ResultContainerType;
typedef typename Configure::ResultIteratorType ResultIteratorType;
typedef PointerVector< ShallowParticle, ShallowParticle*, std::vector<ShallowParticle*> > ParticlePointerVector;
KRATOS_CLASS_POINTER_DEFINITION(MoveShallowWaterParticleUtility);
//template<unsigned int TDim>
MoveShallowWaterParticleUtility(ModelPart& rModelPart, Parameters rParameters) :
mrModelPart(rModelPart),
mScalarVar1(&KratosComponents< Variable<double> >::Get( rParameters["convection_scalar_variable"].GetString() ) ),
mVectorVar1(&KratosComponents< Variable<array_1d<double,3> > >::Get( rParameters["convection_vector_variable"].GetString() ) )
{
KRATOS_TRY
std::cout << "Initializing moveparticle utility for scalar transport" << std::endl;
Parameters default_parameters( R"(
{
"convection_scalar_variable" : "HEIGHT",
"convection_vector_variable" : "VELOCITY",
"maximum_number_of_particles" : 16
} )" );
// Now validate agains defaults -- this also ensures no type mismatch
rParameters.ValidateAndAssignDefaults(default_parameters);
m_scalar_var1_name = rParameters["convection_scalar_variable"].GetString();
m_vector_var1_name = rParameters["convection_vector_variable"].GetString();
mMaxNumberOfParticles = rParameters["maximum_number_of_particles"].GetDouble();
Check();
//storing water and air density and their inverses, just in case it is needed for the streamline integration
//loop in elements to change their ID to their position in the array. Easier to get information later.
//DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!!
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
ielem->SetId(ii+1);
}
mLastElemId= (mrModelPart.ElementsEnd()-1)->Id();
int node_id=0;
// we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used)
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator pnode = inodebegin+ii;
array_1d<double,3> position_node;
double distance=0.0;
position_node = pnode->Coordinates();
GlobalPointersVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES);
//we loop all the nodes to check all the edges
const double number_of_neighbours = static_cast<double>(rneigh.size());
for( GlobalPointersVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++)
{
array_1d<double,3> position_difference;
position_difference = inode->Coordinates() - position_node;
const double current_distance = norm_2( position_difference );
distance += current_distance / number_of_neighbours;
}
//and we save the largest edge.
pnode->SetValue(MEAN_SIZE, distance);
node_id=pnode->GetId();
}
}
mLastNodeId=node_id;
//we also calculate the element mean size in the same way, for the courant number
//also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure
std::vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
double elem_size;
array_1d<double,3> Edge(3,0.0);
Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates();
elem_size = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
elem_size += Edge[d]*Edge[d];
for (unsigned int i = 2; i < (TDim+1); i++)
for(unsigned int j = 0; j < i; j++)
{
Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates();
double Length = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
Length += Edge[d]*Edge[d];
if (Length < elem_size) elem_size = Length;
}
elem_size = sqrt(elem_size);
ielem->SetValue(MEAN_SIZE, elem_size);
}
}
//matrix containing the position of the 4/15/45 particles that we will seed at the beggining
BoundedMatrix<double, 5*(1+TDim), 3 > pos;
BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N;
int particle_id=0;
mNElems = mrModelPart.Elements().size();
std::cout << " about to resize vectors" << std::endl;
//setting the right size to the vector containing the particles assigned to each element
//particles vector. this vector contains ALL the particles in the simulation.
mParticlesVector.resize(mNElems*mMaxNumberOfParticles);
//and this vector contains the current number of particles that are in each element (currently zero)
mNumOfParticlesInElems.resize(mNElems);
mNumOfParticlesInElems=ZeroVector(mNElems);
//when moving the particles, an auxiliary vector is necessary (to store the previous number)
mNumOfParticlesInElemsAux.resize(mNElems);
//each element will have a list of pointers to all the particles that are inside.
//this vector contains the pointers to the vector of (particle) pointers of each element.
mVectorOfParticlePointersVectors.resize(mNElems);
//int artz;
//std::cin >> artz;
int i_int=0; //careful! it's not the id, but the position inside the array!
std::cout << " about to create particles" << std::endl;
//now we seed: LOOP IN ELEMENTS
//using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mMaxNumberOfParticles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one
mOffset=0;
//ShallowParticle& firstparticle = mParticlesVector[0];
for(unsigned int ii=0; ii<mrModelPart.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mMaxNumberOfParticles*2, &firstparticle );
//ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//now we link the mpointers_to_particle_pointers_vectors to the corresponding element
//mpointers_to_particle_pointers_vectors(ii) = &particle_pointers;
//now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half).
//for(int j=0; j<(mMaxNumberOfParticles*2); j++)
// particle_pointers.push_back(&firstparticle);
mVectorOfParticlePointersVectors[ii] = ParticlePointerVector( mMaxNumberOfParticles*2 );
ParticlePointerVector& particle_pointers = mVectorOfParticlePointersVectors[ii];
//int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles = mNumOfParticlesInElems[ii];
number_of_particles=0;
Geometry< Node<3> >& geom = ielem->GetGeometry();
//unsigned int elem_id = ielem->Id();
ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45
//now we seed the particles in the current element
for (unsigned int j = 0; j < pos.size1(); j++)
{
++particle_id;
ShallowParticle& pparticle = mParticlesVector[particle_id-1];
//~ pparticle.X()=pos(j,0);
//~ pparticle.Y()=pos(j,1);
//~ pparticle.Z()=pos(j,2);
pparticle.Coordinates() = row(pos,j);
pparticle.GetEraseFlag()=false;
array_1d<float, 3 > & vector1 = pparticle.GetVector1();
float & scalar1 = pparticle.GetScalar1();
noalias(vector1) = ZeroVector(3);
scalar1=0.0;
for (unsigned int k = 0; k < (TDim+1); k++)
{
scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(*mScalarVar1);
noalias(vector1) += N(j, k) * geom[k].FastGetSolutionStepValue(*mVectorVar1);
}
particle_pointers(j) = &pparticle;
number_of_particles++ ;
}
++i_int;
}
mNParticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true.
std::cout << " [Creating particles : " << mNParticles << " particles created]" << std::endl;
mParticlePrintingToolInitialized=false;
KRATOS_CATCH("")
}
~MoveShallowWaterParticleUtility()
{}
void MountBin()
{
KRATOS_TRY
//copy the elements to a new container, as the list will
//be shuffled duringthe construction of the tree
ContainerType& rElements = mrModelPart.ElementsArray();
IteratorType it_begin = rElements.begin();
IteratorType it_end = rElements.end();
//const int number_of_elem = rElements.size();
typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) );
paux.swap(mpBinsObjectDynamic);
//BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end );
std::cout << " finished mounting Bins" << std::endl;
KRATOS_CATCH("")
}
/// Calculates the mean velocity
/** This function computes the mean velocity within an element and
* stores it in MEAN_VEL_OVER_ELEM_SIZE variable.
* This variable keeps the courant number aprox 0.1 in each substep
*
* @see MoveParticle
* @see MoveParticleInverseWay
*/
void CalculateVelOverElemSize()
{
KRATOS_TRY
const double nodal_weight = 1.0/ (1.0 + double (TDim) );
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Geometry<Node<3> >& geom = ielem->GetGeometry();
array_1d<double, 3 >vector_mean_velocity=ZeroVector(3);
for (unsigned int i=0; i != (TDim+1) ; i++)
vector_mean_velocity += geom[i].FastGetSolutionStepValue(VELOCITY);
vector_mean_velocity *= nodal_weight;
//~ const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) );
const double mean_velocity = norm_2( vector_mean_velocity );
ielem->SetValue(MEAN_VEL_OVER_ELEM_SIZE, mean_velocity / ( ielem->GetValue(MEAN_SIZE) ) );
}
}
KRATOS_CATCH("")
}
/// Reset the boundary conditions
/** When a variable is fixed this function resets the nodal values
* with the previous time step
*/
void ResetBoundaryConditions()
{
KRATOS_TRY
const auto& vector_var_x = KratosComponents<Variable<double>>::Get(m_vector_var1_name+std::string("_X"));
const auto& vector_var_y = KratosComponents<Variable<double>>::Get(m_vector_var1_name+std::string("_Y"));
const auto& vector_var_z = KratosComponents<Variable<double>>::Get(m_vector_var1_name+std::string("_Z"));
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
if (inode->IsFixed(*mScalarVar1))
{
inode->FastGetSolutionStepValue(*mScalarVar1)=inode->GetSolutionStepValue(*mScalarVar1,1);
}
if (inode->IsFixed(vector_var_x))
{
inode->FastGetSolutionStepValue(vector_var_x)=inode->GetSolutionStepValue(vector_var_x,1);
}
if (inode->IsFixed(vector_var_y))
{
inode->FastGetSolutionStepValue(vector_var_y)=inode->GetSolutionStepValue(vector_var_y,1);
}
if (inode->IsFixed(vector_var_z))
{
inode->FastGetSolutionStepValue(vector_var_z)=inode->GetSolutionStepValue(vector_var_z,1);
}
}
}
KRATOS_CATCH("")
}
/// Auxiliar function to compute the "delta variables"
/** Delta variables are the difference between two time steps.
* It's value is used to update particles info
*
* @see CorrectParticlesWithoutMovingUsingDeltaVariables
*/
void CalculateDeltaVariables()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(*mScalarVar1) - inode->FastGetSolutionStepValue(PROJECTED_SCALAR1);
inode->FastGetSolutionStepValue(DELTA_VECTOR1) = inode->FastGetSolutionStepValue(*mVectorVar1) - inode->FastGetSolutionStepValue(PROJECTED_VECTOR1); //PROJECTED_VECTOR1
}
}
KRATOS_CATCH("")
}
/// Auxiliar function
/** This function copy a scalar variable value to the previous time step
*/
void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
/// Auxiliar function
/** This function copy a vector variable value to the previous time step
*/
void CopyVectorVarToPreviousTimeStep(const Variable<array_1d<double,3>>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
noalias(inode->GetSolutionStepValue(OriginVariable,1)) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
/// Move all the particles
/** This function moves the particles across the streamlines
* according to the velocity given by VELOCITY variable. The
* movement is performed in nsubsteps, during a total time
* of DELTA_TIME
*
* @see Moveparticle
*/
void MoveParticles()
{
KRATOS_TRY
const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part)
//since it is the only function in the whole procedure that does this, it must use alternatively one part and the other.
bool even_timestep;
if (offset!=0) even_timestep=false;
else even_timestep=true;
const int post_offset = mMaxNumberOfParticles * static_cast<int>(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles
double delta_t = CurrentProcessInfo[DELTA_TIME];
array_1d<double,TDim+1> N;
const unsigned int max_results = 10000;
//double integration_distance= 2.0;
mMaxSubSteps = 10;
mMaxSubStepDt = delta_t / static_cast<double>(mMaxSubSteps);
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
int & number_of_particles = mNumOfParticlesInElems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES);
mNumOfParticlesInElemsAux[ii] = number_of_particles;
mNumOfParticlesInElems[ii] = 0;
//we reset the local vectors for a faster access;
}
}
std::cout << "convecting particles" << std::endl;
//We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle)
#pragma omp barrier
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
ResultContainerType results(max_results);
GlobalPointersVector< Element > elements_in_trajectory;
elements_in_trajectory.resize(20);
for(unsigned int ielem = element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++)
{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem;
const int old_element_id = old_element->Id();
ParticlePointerVector& old_element_particle_pointers = mVectorOfParticlePointersVectors[old_element_id-1];
if ( (results.size()) != max_results )
results.resize(max_results);
unsigned int number_of_elements_in_trajectory = 0; //excluding the origin one (current one, ielem)
for (int ii = 0; ii < mNumOfParticlesInElemsAux[ielem]; ii++)
{
ShallowParticle& pparticle = old_element_particle_pointers[offset+ii];
Element::Pointer pcurrent_element( *old_element.base() );
ResultIteratorType result_begin = results.begin();
bool & erase_flag=pparticle.GetEraseFlag();
if (erase_flag == false){
MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina
const int current_element_id = pcurrent_element->Id();
int & number_of_particles_in_current_elem = mNumOfParticlesInElems[current_element_id-1];
if (number_of_particles_in_current_elem < mMaxNumberOfParticles && erase_flag == false)
{
ParticlePointerVector& current_element_particle_pointers = mVectorOfParticlePointersVectors[current_element_id-1];
#pragma omp critical
{
if (number_of_particles_in_current_elem < mMaxNumberOfParticles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!!
{
current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle;
number_of_particles_in_current_elem++ ;
KRATOS_ERROR_IF( number_of_particles_in_current_elem > mMaxNumberOfParticles ) <<
"In move shallow water particle utility: exceeded maximum number of particles" << std::endl;
//~ if (number_of_particles_in_current_elem > mMaxNumberOfParticles)
//~ KRATOS_WATCH("MAL");
}
else
{
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
else
{
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
}
}
// After having changed everything we change the status of the mOddTimeStep flag:
mOffset = post_offset;; //
KRATOS_CATCH("")
}
/// Transfer particles information to the mesh nodes
/** This function explicitly projects data from particles (lagrangian)
* onto the eulerian mesh. Shape functions of the elements determine
* the particle location within the element and its contribution to
* each node as a weighting function.
*/
void TransferLagrangianToEulerian() //explicit
{
KRATOS_TRY
const double threshold = 1e-10 / (static_cast<double>(TDim)+1.0);
std::cout << "projecting info to mesh" << std::endl;
const int offset = mOffset;
// the array of pointers for each element has twice the required size so that
// we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles)
// We must project data from the particles (lagrangian) onto the eulerian mesh
//int nnodes = mrModelPart.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
// We save data from previous time step of the eulerian mesh in case we must reuse it later
// cos no particle was found around the nodes though we could've use a bigger buffer, to be changed later!
// after having saved data, we reset them to zero, this way it's easier to add the contribution
// of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mrModelPart.NodesBegin();
std::vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=0.0;
inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=ZeroVector(3);
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
// Adding contribution, loop on elements, since each element has stored the particles found inside of it
std::vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,3*(TDim+1)> nodes_added_vector1 = ZeroVector(3*(TDim+1));
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_added_weights = ZeroVector((TDim+1));
//array_1d<double,(TDim+1)> weighting_inverse_divisor;
Geometry<Node<3> >& geom = ielem->GetGeometry();
for (int i=0 ; i!=(TDim+1) ; ++i)
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
//weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01);
}
int & number_of_particles_in_elem= mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mMaxNumberOfParticles) // It means we are out of our portion of the array, abort loop!
break;
ShallowParticle& pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1();
const array_1d<float,3>& particle_vector1 = pparticle.GetVector1();
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) // Something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl;
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
// These lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions
//double sq_dist = 0;
//for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k]));
//double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) );
double weight=N(j)*N(j);
//weight=N(j)*N(j)*N(j);
if (weight<threshold) weight=1e-10;
nodes_added_weights[j] += weight;
nodes_added_scalar1[j] += weight*static_cast<double>(particle_scalar1);
for (int k=0 ; k!=(TDim); k++) //x,y,(z)
{
nodes_added_vector1[j*3+k] += weight * static_cast<double>(particle_vector1[k]);
}
}
}
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(PROJECTED_SCALAR1) += nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_X) += nodes_added_vector1[3*i+0];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Y) += nodes_added_vector1[3*i+1];
geom[i].FastGetSolutionStepValue(PROJECTED_VECTOR1_Z) += nodes_added_vector1[3*i+2];
geom[i].FastGetSolutionStepValue(YP) += nodes_added_weights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
double & scalar = inode->FastGetSolutionStepValue(PROJECTED_SCALAR1);
array_1d<double,3> & vector = inode->FastGetSolutionStepValue(PROJECTED_VECTOR1);
scalar /=sum_weights; // resetting the scalar1
vector /=sum_weights; // resetting the vector1
}
else // This should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(PROJECTED_SCALAR1)=inode->FastGetSolutionStepValue(*mScalarVar1,1); // Resetting the convected scalar
inode->FastGetSolutionStepValue(PROJECTED_VECTOR1)=inode->FastGetSolutionStepValue(*mVectorVar1,1); // Resetting the convected vector
}
}
}
KRATOS_CATCH("")
}
/// Update all the particles without moving them
/** This function updates all the particles variables using the
* "delta variables" from the nodal database.
*
* @see CorrectParticleUsingDeltaVariables
*/
void CorrectParticlesWithoutMovingUsingDeltaVariables()
{
KRATOS_TRY
const int offset = mOffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles)
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
std::vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mrModelPart.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Element::Pointer pelement(*ielem.base());
Geometry<Node<3> >& geom = ielem->GetGeometry();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii>mMaxNumberOfParticles) //it means we are out of our portion of the array, abort loop!
break;
ShallowParticle & pparticle = element_particle_pointers[offset+iii];
bool erase_flag= pparticle.GetEraseFlag();
if (erase_flag==false)
{
CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper
}
}
}
}
KRATOS_CATCH("")
}
/// Fill an element with particles
/** This function is to be executed after moving particles and
* before tranferring data from lagrangian particles to eulerian mesh
* If an element finishes with less particles than "minimum number
* of particles", then PreReseed adds particles inside it.
* A minimal reseed is performed in order to not disturb the projection
* from lagrangian to euelrian.
*
* @see MinimumNumberOfParticles
*
* @see MoveParticles
* @see MoveParticleInverseWay: is called to get the particle values
*/
void PreReseed(int MinimumNumberOfParticles)
{
KRATOS_TRY
const int offset =mOffset;
const int max_results = 1000;
//tools for the paralelization
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
std::vector<unsigned int> elem_partition;
int number_of_rows = mrModelPart.Elements().size();
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition)
{
ResultContainerType results(max_results);
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mrModelPart.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mrModelPart.ElementsBegin() + elem_partition[k+1] ;
//ModelPart::NodesContainerType local_list=aux[k];
//PointerVectorSet<ShallowParticle, IndexedObject> & list=aux[k];
BoundedMatrix<double, (TDim+1), 3 > pos;
BoundedMatrix<double, (TDim+1) , (TDim+1) > N;
unsigned int freeparticle=0; //we start with the first position in the particles array
//int local_id=1;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
results.resize(max_results);
//const int & elem_id = ielem->Id();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem = mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
if (number_of_particles_in_elem < (MinimumNumberOfParticles)) // && (ielem->GetGeometry())[0].Y()<0.10 )
{
Geometry< Node<3> >& geom = ielem->GetGeometry();
ComputeGaussPointPositionsForPreReseed(geom, pos, N);
for (unsigned int j = 0; j < (pos.size1()); j++) // I am dropping the last one, the one in the middle of the element
{
bool keep_looking = true;
while(keep_looking)
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
mParticlesVector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
freeparticle++;
}
ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux2_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N);
KRATOS_ERROR_IF_NOT( is_found ) <<
"In move shallow water particle utility: particle not found in domain" << std::endl;
pparticle.GetEraseFlag()=false;
ResultIteratorType result_begin = results.begin();
Element::Pointer pelement( *ielem.base() );
MoveParticleInverseWay(pparticle, pelement, result_begin, max_results);
//and we copy it to the array:
mParticlesVector[freeparticle] = pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle];
pparticle.GetEraseFlag()=false;
number_of_particles_in_elem++;
}
}
}
}
KRATOS_CATCH("")
}
/// Fill an element with particles
/** This function is to be executed after the mesh stage solver is
* called and the particles are updated.
* If an element contains less particles than "minimum number of
* particles", then PostReseed adds particles inside it.
* A full reseed is performed and the particle gets it's convected
* variables directly from the eulerian mesh
*
* @param MinimumNumberOfParticles
*
* @see PreReseed
*/
void PostReseed(int MinimumNumberOfParticles) //pooyan's way
{
KRATOS_TRY
const int offset = mOffset;
//TOOLS FOR THE PARALELIZATION
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
std::vector<unsigned int> elem_partition;
int number_of_rows=mrModelPart.Elements().size();
//KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", "");
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mrModelPart.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids
{
unsigned int reused_particles=0;
unsigned int freeparticle = 0; //we start by the first position;
int k = OpenMPUtils::ThisThread();
BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D)
BoundedMatrix<double, (3+2*TDim), (TDim+1) > N;
double mesh_scalar1;
array_1d<double,3> mesh_vector1;
array_1d<int, (3+2*TDim) > positions;
unsigned int number_of_reseeded_particles;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
int & number_of_particles_in_elem = mNumOfParticlesInElems[ii];
ParticlePointerVector& element_particle_pointers = mVectorOfParticlePointersVectors[ii];
Geometry< Node<3> >& geom = ielem->GetGeometry();
if ( number_of_particles_in_elem < (MinimumNumberOfParticles) ) // && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(MinimumNumberOfParticles) ) )
{
//bool reseed_more=false;
number_of_reseeded_particles = 0;
//reseed_more=true;
number_of_reseeded_particles = 3 + 2*TDim;
ComputeGaussPointPositionsForPostReseed(geom, pos, N);
for (unsigned int j = 0; j < number_of_reseeded_particles; j++)
{
// Now we have to find an empty space (a particle that was about to be deleted) in the
// particles model part. once found. there will be our renewed particle:
bool keep_looking = true;
while(keep_looking)
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mParticlesVector[freeparticle].GetEraseFlag()==true)
{
mParticlesVector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
freeparticle++;
}
ShallowParticle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N);
KRATOS_ERROR_IF_NOT( is_found ) <<
"In move shallow water particle utility: particle not found in domain" << std::endl;
mesh_scalar1 = 0.0;
mesh_vector1 = ZeroVector(3);
for (unsigned int l = 0; l < (TDim+1); l++)
{
mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(*mScalarVar1);
noalias(mesh_vector1) += N(j, l) * geom[l].FastGetSolutionStepValue(*mVectorVar1);
}
pparticle.GetScalar1()=mesh_scalar1;
pparticle.GetVector1()=mesh_vector1;
pparticle.GetEraseFlag()=false;
mParticlesVector[freeparticle]=pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mParticlesVector[freeparticle];
number_of_particles_in_elem++;
KRATOS_ERROR_IF( keep_looking ) <<
"In move shallow water particle utility: Finished the list and couldnt find a free cell for the new particle!" << std::endl;
reused_particles++;
}
}
}
}
KRATOS_CATCH("")
}
/// Fill a model part with particles
/** This function prints the particles to a model part
*
* @param rLagrangianModelPart: empty model part to print particles
* @param FilterFactor: the function will print one particle of every "filter factor"
*/
void ExecuteParticlesPrintingTool( ModelPart& rLagrangianModelPart, unsigned int FilterFactor )
{
KRATOS_TRY
// We will only print one out of every "filter factor" particles of the total particle list
if (mParticlePrintingToolInitialized == false)
{
KRATOS_ERROR_IF( rLagrangianModelPart.NodesBegin() - rLagrangianModelPart.NodesEnd() > 0 ) <<
"In move shallow water particle utility: an empty model part is required for the particles printing tool" << std::endl;
rLagrangianModelPart.AddNodalSolutionStepVariable(*mScalarVar1);
rLagrangianModelPart.AddNodalSolutionStepVariable(DISPLACEMENT);
for (unsigned int i = 0; i != ((mMaxNumberOfParticles*mNElems)/FilterFactor) + FilterFactor; i++)
{
Node < 3 > ::Pointer pnode = rLagrangianModelPart.CreateNewNode( i+mLastNodeId+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!!
//pnode->SetBufferSize(mrModelPart.NodesBegin()->GetBufferSize());
pnode->SetBufferSize(1);
}
mParticlePrintingToolInitialized=true;
}
// Resetting data of the unused particles
const double inactive_particle_position = -10.0;
array_1d<double,3>inactive_particle_position_vector;
inactive_particle_position_vector(0)=inactive_particle_position;
inactive_particle_position_vector(1)=inactive_particle_position;
inactive_particle_position_vector(2)=inactive_particle_position;
ModelPart::NodesContainerType::iterator inodebegin = rLagrangianModelPart.NodesBegin();
for(unsigned int ii = 0; ii < rLagrangianModelPart.Nodes().size(); ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(*mScalarVar1) = 0.0;
inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector;
}
int counter = 0;
//ModelPart::NodesContainerType::iterator it_begin = rLagrangianModelPart.NodesBegin();
for (int i = 0; i != mMaxNumberOfParticles*mNElems; i++)
{
ShallowParticle& pparticle = mParticlesVector[i];
if(pparticle.GetEraseFlag() == false && i%FilterFactor == 0)
{
ModelPart::NodesContainerType::iterator inode = inodebegin + counter; //copying info from the particle to the (printing) node.
inode->FastGetSolutionStepValue(*mScalarVar1) = pparticle.GetScalar1();
inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates();
counter++;
}
}
KRATOS_CATCH("")
}
protected:
private:
/// Move a particle
/** this function moves a particle according to the velocity given
* by VELOCITY variable. The movement is performed in nsubsteps,
* during a total time of DELTA_TIME
*
* @param pParticle
* @param pElement
* @param rElementsInTrajectory
* @param rNumberOfElementsInTrajectory
* @param ResultBegin
* @param MaxNumberOfResults
*
* @see MoveParticles
*/
void MoveParticle(ShallowParticle & pParticle,
Element::Pointer & pElement,
GlobalPointersVector< Element >& rElementsInTrajectory,
unsigned int & rNumberOfElementsInTrajectory,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool keep_integrating = false;
bool is_found;
array_1d<double,3> vel;
array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3);
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pParticle.Coordinates(); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
keep_integrating=true;
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
vel=ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
//calculating substep to get +- courant(substep) = 0.1
nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position += vel*substep_dt;//weight;
// DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH VELOCITY
unsigned int check_from_element_number = 0;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (keep_integrating == true)
{
is_found = FindNodeOnMesh(position, N, pElement, rElementsInTrajectory, rNumberOfElementsInTrajectory, check_from_element_number, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
only_integral += 1.0; //values saved for the current time step
position+=vel*substep_dt;//weight;
}
else
{
keep_integrating=false;
break;
}
}
else
break;
}
}
if (keep_integrating == false) (pParticle.GetEraseFlag()=true);
else is_found = FindNodeOnMesh(position, N ,pElement,ResultBegin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pElement)
if (is_found == false) ( pParticle.GetEraseFlag()=true);
pParticle.Coordinates() = position;
}
/// This function updates a particle
/** This function updates a particle variables using the "delta
* variables" from the nodal database.
*
* @param pParticle
* @param pElement
* @param rGeom
*
* @see CorrectParticlesWithoutMovingUsingDeltaVariables
*/
void CorrectParticleUsingDeltaVariables(ShallowParticle & pParticle,
Element::Pointer & pElement,
Geometry< Node<3> >& rGeom)
{
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
array_1d<double,3> coords = pParticle.Coordinates();
float & particle_scalar1 = pParticle.GetScalar1();
array_1d<float,3> & particle_vector1 = pParticle.GetVector1();
//double distance=0.0;
double delta_scalar1 = 0.0;
array_1d<double,3> delta_vector1 = ZeroVector(3);
bool is_found = CalculatePosition(rGeom,coords[0],coords[1],coords[2],N);
if(is_found == false)
{
KRATOS_INFO("MoveShallowWaterParticleUtility") << N << std::endl;
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 )
N[j]=1e-10;
}
for(unsigned int j=0; j<(TDim+1); j++)
{
delta_scalar1 += rGeom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j];
noalias(delta_vector1) += rGeom[j].FastGetSolutionStepValue(DELTA_VECTOR1)*N[j];
}
particle_scalar1 = particle_scalar1 + delta_scalar1;
particle_vector1 = particle_vector1 + delta_vector1;
}
/// Move a particle in the inverse way
/** this function moves a particle according to the -velocity given
* by VELOCITY variable. The movement is performed by a backward
* integration in nsubsteps, during a total time of DELTA_TIME
* Before the particle goes out of the element, gets the value
* of the eulerian mesh and stores it
*
* @param pParticle
* @param pElement
* @param ResultBegin
* @param MaxNumberOfResults
*
* @see PreReseed
*/
void MoveParticleInverseWay(ShallowParticle & pParticle,
Element::Pointer & pElement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO!
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
const ProcessInfo& CurrentProcessInfo = mrModelPart.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool keep_integrating = false;
bool is_found;
double scalar1 = 0.0;
array_1d<double,3> vector1;
array_1d<double,3> vel;
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pParticle.Coordinates(); // + (pParticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
keep_integrating = true;
Geometry< Node<3> >& geom = pElement->GetGeometry(); //the element we're in
scalar1 = 0.0;
vector1 = ZeroVector(3);
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(*mScalarVar1)*N[j];
noalias(vector1) += geom[j].FastGetSolutionStepValue(*mVectorVar1)*N[j];
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
//calculating substep to get +- courant(substep) = 1/4
nsubsteps = 10.0 * (delta_t * pElement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0; // weight;//*double(nsubsteps);
position -= vel*substep_dt; //weight;
for(unsigned int i=0; i<(nsubsteps-1); i++) // this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (keep_integrating == true)
{
is_found = FindNodeOnMesh(position, N, pElement, ResultBegin, MaxNumberOfResults); //good, now we know where this point is:
if (is_found == true)
{
Geometry< Node<3> >& geom = pElement->GetGeometry();//the element we're in
scalar1 = 0.0;
vector1 = ZeroVector(3);
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(*mScalarVar1)*N(j);
noalias(vector1) += geom[j].FastGetSolutionStepValue(*mVectorVar1)*N[j];
noalias(vel) += geom[j].FastGetSolutionStepValue(VELOCITY)*N[j];
}
only_integral += 1.0; //weight ; //values saved for the current time step
position -= vel*substep_dt; //weight;
}
else keep_integrating = false;
}
}
pParticle.GetScalar1() = scalar1;
pParticle.GetVector1() = vector1;
}
}
/// Find the element into which a given node is located
/** This function should find the element into which a given node
* is located and return a pointer to the element and the vector
* containing the shape functions that define the positions within
* the element.
* If false is returned the element is not found
*
* @param position of the node
* @param N: return shape functions that define the positions within the elem
* @param pElement: return a pointer to the element
* @param ResultBegin
* @param MaxNumberOfResults
* @return FindNodeOnMesh if the element is found of not
*
* @see CalculatePosition
*/
bool FindNodeOnMesh( const array_1d<double,3>& rPosition,
array_1d<double,TDim+1>& N,
Element::Pointer & pElement,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_1) //that was easy!
{
return true;
}
// To begin with we check the neighbour elements; it is a bit more expensive
GlobalPointersVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS);
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_2)
{
pElement = neighb_elems[i].shared_from_this();
return true;
}
}
// If checking all the neighbour elements did not work, we have to use the bins
// ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults );
if (results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(ResultBegin + i))->GetGeometry();
//find local position
bool is_found_3 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_3)
{
pElement = (*(ResultBegin + i))->shared_from_this();
return true;
}
}
}
//if nothing worked, then:
//not found case
return false;
}
/// Find the element into which a given node is located
/** This function should find the element into which a given node
* is located and return a pointer to the element and the vector
* containing the shape functions that define the positions within
* the element.
* If false is returned the element is not found
* This version includes predefined elements following a trajectory
*
* @param rPosition of the node
* @param N Output shape functions that define the positions within the elem
* @param pElement Output a pointer to the element
* @param rElementsInTrajectory
* @param rNumberOfElementsInTrajectory Output
* @param CheckFromElementNumber
* @param ResultBegin
* @param MaxNumberOfResults
* @return FindNodeOnMesh if the element is found of not
*
* @see CalculatePosition
*/
bool FindNodeOnMesh( const array_1d<double,3>& rPosition,
array_1d<double,TDim+1>& N,
Element::Pointer & pElement,
GlobalPointersVector< Element >& rElementsInTrajectory,
unsigned int & rNumberOfElementsInTrajectory,
unsigned int & rCheckFromElementNumber,
ResultIteratorType ResultBegin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
//~ const array_1d<double,3>& coords = rPosition;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pElement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,rPosition[0],rPosition[1],rPosition[2],N);
if(is_found_1 == true)
{
return true; //that was easy!
}
// If it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element.
for (unsigned int i=(rCheckFromElementNumber);i!=rNumberOfElementsInTrajectory;i++)
{
Geometry<Node<3> >& geom = rElementsInTrajectory[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],aux_N);
if (is_found_2)
{
pElement = rElementsInTrajectory[i].shared_from_this();
N = aux_N;
rCheckFromElementNumber = i+1 ; //now i element matches pElement, so to avoid cheching twice the same element we send the counter to the following element.
return true;
}
}
// Now we check the neighbour elements:
GlobalPointersVector< Element >& neighb_elems = pElement->GetValue(NEIGHBOUR_ELEMENTS);
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found_2)
{
pElement = neighb_elems[i].shared_from_this();
if (rNumberOfElementsInTrajectory<20)
{
rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement;
rNumberOfElementsInTrajectory++;
rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
// If checking all the neighbour elements did not work, we have to use the bins
// ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{rPosition}, ResultBegin, MaxNumberOfResults );
if(results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(ResultBegin + i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,rPosition[0],rPosition[1],rPosition[2],N);
if (is_found)
{
pElement = (*(ResultBegin + i))->shared_from_this();
if (rNumberOfElementsInTrajectory<20)
{
rElementsInTrajectory(rNumberOfElementsInTrajectory) = pElement;
rNumberOfElementsInTrajectory++;
rCheckFromElementNumber = rNumberOfElementsInTrajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the rElementsInTrajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
}
//not found case
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rGeom: the element (a triangle)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom,
const double xc,
const double yc,
const double zc,
array_1d<double,3> & N )
{
double x0 = rGeom[0].X();
double y0 = rGeom[0].Y();
double x1 = rGeom[1].X();
double y1 = rGeom[1].Y();
double x2 = rGeom[2].X();
double y2 = rGeom[2].Y();
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl;
double inv_area = 1.0 / area;
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0)
//if the xc yc is inside the triangle return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rNodesPositions of the element (a triangle)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions,
const double xc,
const double yc,
const double zc,
array_1d<double,3> & N )
{
const double& x0 = rNodesPositions[0];
const double& y0 = rNodesPositions[1];
const double& x1 = rNodesPositions[3];
const double& y1 = rNodesPositions[4];
const double& x2 = rNodesPositions[6];
const double& y2 = rNodesPositions[7];
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
KRATOS_ERROR_IF( area == 0.0 ) << "In move shallow water particle utility: element with zero area found" << std::endl;
double inv_area = 1.0 / area;
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0)
//if the xc yc is inside the triangle return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rGeom: the element (a tetrahedron)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const Geometry<Node < 3 > >&rGeom,
const double xc,
const double yc,
const double zc,
array_1d<double, 4 > & N )
{
double x0 = rGeom[0].X();
double y0 = rGeom[0].Y();
double z0 = rGeom[0].Z();
double x1 = rGeom[1].X();
double y1 = rGeom[1].Y();
double z1 = rGeom[1].Z();
double x2 = rGeom[2].X();
double y2 = rGeom[2].Y();
double z2 = rGeom[2].Z();
double x3 = rGeom[3].X();
double y3 = rGeom[3].Y();
double z3 = rGeom[3].Z();
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl;
double inv_vol = 1.0 / vol;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
/// Calculate the position of a given particle inside an element
/** This function calculates the position of a given particle inside
* an element and returns the shape functions that define it position
* within the element and returns false if the particle is otuside
* the element
*
* @param rNodesPositions of the element (a tetrahedron)
* @param xc: the postition of the particle
* @param yc: the postition of the particle
* @param zc: the postition of the particle
* @param N: the shape functions to define the particle position
*
* @return CalculatePosition
*/
inline bool CalculatePosition( const array_1d<double,3*(TDim+1)>& rNodesPositions,
const double xc,
const double yc,
const double zc,
array_1d<double, 4 > & N )
{
const double& x0 = rNodesPositions[0];
const double& y0 = rNodesPositions[1];
const double& z0 = rNodesPositions[2];
const double& x1 = rNodesPositions[3];
const double& y1 = rNodesPositions[4];
const double& z1 = rNodesPositions[5];
const double& x2 = rNodesPositions[6];
const double& y2 = rNodesPositions[7];
const double& z2 = rNodesPositions[8];
const double& x3 = rNodesPositions[9];
const double& y3 = rNodesPositions[10];
const double& z3 = rNodesPositions[11];
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
KRATOS_ERROR_IF( vol == 0.0 ) << "In move shallow water particle utility: element with zero vol found" << std::endl;
double inv_vol = 1.0 / vol;
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
/// Calculate the volume
/** This function computes the area of a triangle
*/
inline double CalculateVol( const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2 )
{
return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0));
}
/// Calculate the volume
/** This function computes the volume of a tetrahedron
*/
inline double CalculateVol( const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3 )
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666667;
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_4( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 7, 3 > & pos,
BoundedMatrix<double, 7, 3 > & N )
{
double one_third = 1.0 / 3.0;
double one_sixt = 0.15; //1.0 / 6.0;
double two_third = 0.7; //2.0 * one_third;
N(0, 0) = one_sixt;
N(0, 1) = one_sixt;
N(0, 2) = two_third;
N(1, 0) = two_third;
N(1, 1) = one_sixt;
N(1, 2) = one_sixt;
N(2, 0) = one_sixt;
N(2, 1) = two_third;
N(2, 2) = one_sixt;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
//first
pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X();
pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y();
pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z();
//second
pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X();
pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y();
pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z();
//third
pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X();
pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y();
pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
}
/// Compute the Gauss points
/** For a triangle
*
* @see PostReseed
*/
void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 7, 3 > & pos,
BoundedMatrix<double, 7, 3 > & N ) //2d
{
double one_third = 1.0 / 3.0;
double one_eight = 0.12; //1.0 / 6.0;
double three_quarters = 0.76; //2.0 * one_third;
N(0, 0) = one_eight;
N(0, 1) = one_eight;
N(0, 2) = three_quarters;
N(1, 0) = three_quarters;
N(1, 1) = one_eight;
N(1, 2) = one_eight;
N(2, 0) = one_eight;
N(2, 1) = three_quarters;
N(2, 2) = one_eight;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
N(4, 0) = one_eight;
N(4, 1) = 0.44;
N(4, 2) = 0.44;
N(5, 0) = 0.44;
N(5, 1) = one_eight;
N(5, 2) = 0.44;
N(6, 0) = 0.44;
N(6, 1) = 0.44;
N(6, 2) = one_eight;
//first
pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X();
pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y();
pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z();
//second
pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X();
pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y();
pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z();
//third
pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X();
pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y();
pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
//fifth
pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X();
pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y();
pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z();
//sixth
pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X();
pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y();
pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z();
//seventh
pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X();
pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y();
pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z();
}
/// Compute the Gauss points
/** For a tetrahedron
*
* @see PostReseed
*/
void ComputeGaussPointPositionsForPostReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 9, 3 > & pos,
BoundedMatrix<double, 9, 4 > & N ) //3D
{
double one_quarter = 0.25;
double small_fraction = 0.1; //1.0 / 6.0;
double big_fraction = 0.7; //2.0 * one_third;
double mid_fraction = 0.3; //2.0 * one_third;
N(0, 0) = big_fraction;
N(0, 1) = small_fraction;
N(0, 2) = small_fraction;
N(0, 3) = small_fraction;
N(1, 0) = small_fraction;
N(1, 1) = big_fraction;
N(1, 2) = small_fraction;
N(1, 3) = small_fraction;
N(2, 0) = small_fraction;
N(2, 1) = small_fraction;
N(2, 2) = big_fraction;
N(2, 3) = small_fraction;
N(3, 0) = small_fraction;
N(3, 1) = small_fraction;
N(3, 2) = small_fraction;
N(3, 3) = big_fraction;
N(4, 0) = one_quarter;
N(4, 1) = one_quarter;
N(4, 2) = one_quarter;
N(4, 3) = one_quarter;
N(5, 0) = small_fraction;
N(5, 1) = mid_fraction;
N(5, 2) = mid_fraction;
N(5, 3) = mid_fraction;
N(6, 0) = mid_fraction;
N(6, 1) = small_fraction;
N(6, 2) = mid_fraction;
N(6, 3) = mid_fraction;
N(7, 0) = mid_fraction;
N(7, 1) = mid_fraction;
N(7, 2) = small_fraction;
N(7, 3) = mid_fraction;
N(8, 0) = mid_fraction;
N(8, 1) = mid_fraction;
N(8, 2) = mid_fraction;
N(8, 3) = small_fraction;
pos=ZeroMatrix(9,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=9; j++) //going through the 9 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
/// Compute the Gauss points
/** For a triangle
*
* @see PreReseed
*/
void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 3, 3 > & pos,
BoundedMatrix<double, 3, 3 > & N ) //2D
{
N(0, 0) = 0.5;
N(0, 1) = 0.25;
N(0, 2) = 0.25;
N(1, 0) = 0.25;
N(1, 1) = 0.5;
N(1, 2) = 0.25;
N(2, 0) = 0.25;
N(2, 1) = 0.25;
N(2, 2) = 0.5;
//first
pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X();
pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y();
pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z();
//second
pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X();
pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y();
pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z();
//third
pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X();
pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y();
pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z();
}
/// Compute the Gauss points
/** For a tetrahedron
*
* @see PreReseed
*/
void ComputeGaussPointPositionsForPreReseed( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 4, 3 > & pos,
BoundedMatrix<double, 4, 4 > & N ) //3D
{
//creating 4 particles, each will be closer to a node and equidistant to the other nodes
N(0, 0) = 0.4;
N(0, 1) = 0.2;
N(0, 2) = 0.2;
N(0, 3) = 0.2;
N(1, 0) = 0.2;
N(1, 1) = 0.4;
N(1, 2) = 0.2;
N(1, 3) = 0.2;
N(2, 0) = 0.2;
N(2, 1) = 0.2;
N(2, 2) = 0.4;
N(2, 3) = 0.2;
N(3, 0) = 0.2;
N(3, 1) = 0.2;
N(3, 2) = 0.2;
N(3, 3) = 0.4;
pos=ZeroMatrix(4,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=4; j++) //going through the 4 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_45( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 45, 3 > & pos,
BoundedMatrix<double, 45, 3 > & N )
{
unsigned int counter=0;
for (unsigned int i=0; i!=9;i++)
{
for (unsigned int j=0; j!=(9-i);j++)
{
N(counter,0)=0.05+double(i)*0.1;
N(counter,1)=0.05+double(j)*0.1;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
counter++;
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 15, 3 > & pos,
BoundedMatrix<double, 15, 3 > & N ) //2D
{
unsigned int counter=0;
for (unsigned int i=0; i!=5;i++)
{
for (unsigned int j=0; j!=(5-i);j++)
{
N(counter,0)=0.05+double(i)*0.2;
N(counter,1)=0.05+double(j)*0.2;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
counter++;
}
}
}
/// Compute the Gauss points
/**
*/
void ComputeGaussPointPositions_initial( Geometry< Node < 3 > >& geom,
BoundedMatrix<double, 20, 3 > & pos,
BoundedMatrix<double, 20, 4 > & N ) //3D
{
double fraction_increment;
unsigned int counter=0;
for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles
{
for (unsigned int j=0; j!=(4-i);j++)
{
for (unsigned int k=0; k!=(4-i-j);k++)
{
N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1)
//total = 1.0 - N(counter,0);
fraction_increment = 0.27; //
N(counter,1)=fraction_increment * (0.175 + double(j));
N(counter,2)=fraction_increment * (0.175 + double(k));
N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z();
counter++;
}
}
}
}
/// check function
virtual int Check()
{
KRATOS_TRY
Node<3>& rnode = *mrModelPart.NodesBegin();
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*mVectorVar1), rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA((*mScalarVar1), rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(VELOCITY, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_VECTOR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(DELTA_SCALAR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_VECTOR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(PROJECTED_SCALAR1, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(MEAN_SIZE, rnode)
KRATOS_CHECK_VARIABLE_IN_NODAL_DATA(YP, rnode)
return 0;
KRATOS_CATCH("")
}
/// Member variables
ModelPart& mrModelPart;
int mNParticles;
int mNElems;
int mOffset;
int mMaxSubSteps;
double mMaxSubStepDt;
int mMaxNumberOfParticles;
std::vector< ShallowParticle > mParticlesVector;
int mLastElemId;
bool mOddTimeStep;
bool mParticlePrintingToolInitialized;
unsigned int mLastNodeId;
DenseVector<int> mNumOfParticlesInElems;
DenseVector<int> mNumOfParticlesInElemsAux;
DenseVector<ParticlePointerVector> mVectorOfParticlePointersVectors;
typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic;
const Variable<double>* mScalarVar1;
const Variable<array_1d<double,3>>* mVectorVar1;
std::string m_scalar_var1_name;
std::string m_vector_var1_name;
}; // class MoveShallowWaterParticleUtility
} // namespace Kratos.
#endif // KRATOS_MOVE_SHALLOW_WATER_PARTICLE_UTILITY_H_INCLUDED defined
|
DRB112-linear-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
omp for loop is allowed to use the linear clause, an OpenMP 4.5 addition.
*/
#include <stdio.h>
int main()
{
int len=100;
double a[len], b[len], c[len];
int i,j=0;
#pragma omp parallel for private(i)
for (i=0;i<len;i++)
{
a[i]=((double)i)/2.0;
b[i]=((double)i)/3.0;
c[i]=((double)i)/7.0;
}
#pragma omp parallel for private(i) linear(j)
for (i=0;i<len;i++)
{
c[j]+=a[i]*b[i];
j++;
}
printf ("c[50]=%f\n",c[50]);
return 0;
}
|
core_zlauum.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include "core_blas.h"
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_lauum
*
* Computes the product U * U^H or L^H * L, where the triangular
* factor U or L is stored in the upper or lower triangular part of
* the array A.
*
* If uplo = 'U' or 'u' then the upper triangle of the result is stored,
* overwriting the factor U in A.
* If uplo = 'L' or 'l' then the lower triangle of the result is stored,
* overwriting the factor L in A.
*
*******************************************************************************
*
* @param[in] uplo
* = PlasmaUpper: Upper triangle of A is stored;
* = PlasmaLower: Lower triangle of A is stored.
*
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] A
* On entry, the triangular factor U or L.
* On exit, if uplo = 'U', the upper triangle of A is
* overwritten with the upper triangle of the product U * U^H;
* if uplo = 'L', the lower triangle of A is overwritten with
* the lower triangle of the product L^H * L.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @param[out] info
* - 0 on successful exit
* - < 0 if -i, the i-th argument had an illegal value
*
******************************************************************************/
int core_zlauum(plasma_enum_t uplo,
int n,
plasma_complex64_t *A, int lda)
{
return LAPACKE_zlauum_work(LAPACK_COL_MAJOR,
lapack_const(uplo), n, A, lda);
}
/******************************************************************************/
void core_omp_zlauum(plasma_enum_t uplo,
int n,
plasma_complex64_t *A, int lda,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A[0:lda*n])
{
if (sequence->status == PlasmaSuccess) {
int info = core_zlauum(uplo, n, A, lda);
if (info != PlasmaSuccess) {
coreblas_error("core_zlauum() failed");
plasma_request_fail(sequence, request, PlasmaErrorInternal);
}
}
}
}
|
GB_unop__identity_int8_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_int32)
// op(A') function: GB (_unop_tran__identity_int8_int32)
// C type: int8_t
// A type: int32_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = (int8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_int32)
(
int8_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB093-doall2-collapse-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
collapse(2) is used to associate two loops with omp for.
The corresponding loop iteration variables are private.
*/
int a[100][100];
int main()
{
int i,j;
#pragma omp parallel for collapse(2)
for (i=0;i<100;i++)
for (j=0;j<100;j++)
a[i][j]=a[i][j]+1;
return 0;
}
|
GB_unaryop__minv_int8_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int8_bool
// op(A') function: GB_tran__minv_int8_bool
// C type: int8_t
// A type: bool
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
bool
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int8_bool
(
int8_t *Cx, // Cx and Ax may be aliased
bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int8_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
paint.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP AAA IIIII N N TTTTT %
% P P A A I NN N T %
% PPPP AAAAA I N N N T %
% P A A I N NN T %
% P A A IIIII N N T %
% %
% %
% Methods to Paint on an Image %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o o d f i l l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FloodfillPaintImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. The fuzz member of
% image defines how much tolerance is acceptable to consider two colors as
% the same. For example, set fuzz to 10 and the color red at intensities of
% 100 and 102 respectively are now interpreted as the same color for the
% purposes of the floodfill.
%
% The format of the FloodfillPaintImage method is:
%
% MagickBooleanType FloodfillPaintImage(Image *image,
% const DrawInfo *draw_info,const PixelInfo target,
% const ssize_t x_offset,const ssize_t y_offset,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x_offset,y_offset: the starting location of the operation.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType FloodfillPaintImage(Image *image,
const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset,
const ssize_t y_offset,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define MaxStacksize 524288UL
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
{ \
segment_info=RelinquishVirtualMemory(segment_info); \
image_view=DestroyCacheView(image_view); \
floodplane_view=DestroyCacheView(floodplane_view); \
floodplane_image=DestroyImage(floodplane_image); \
ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \
} \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
CacheView
*floodplane_view,
*image_view;
Image
*floodplane_image;
MagickBooleanType
skip,
status;
MemoryInfo
*segment_info;
PixelInfo
fill_color,
pixel;
SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if ((image->alpha_trait == UndefinedPixelTrait) &&
(draw_info->fill.alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(image,OpaqueAlpha,exception);
/*
Set floodfill state.
*/
floodplane_image=CloneImage(image,0,0,MagickTrue,exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
floodplane_image->alpha_trait=UndefinedPixelTrait;
floodplane_image->colorspace=GRAYColorspace;
(void) QueryColorCompliance("#000",AllCompliance,
&floodplane_image->background_color,exception);
(void) SetImageBackgroundColor(floodplane_image,exception);
segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack));
if (segment_info == (MemoryInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info);
/*
Push initial segment on stack.
*/
status=MagickTrue;
start=0;
s=segment_stack;
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception);
PushSegmentStack(y_offset,x_offset,x_offset,1);
PushSegmentStack(y_offset+1,x_offset,x_offset,-1);
while (s > segment_stack)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception);
q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
p+=x1*GetPixelChannels(image);
q+=x1*GetPixelChannels(floodplane_image);
for (x=x1; x >= 0; x--)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p-=GetPixelChannels(image);
q-=GetPixelChannels(floodplane_image);
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns-
x,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
status=SyncCacheViewAuthenticPixels(floodplane_view,exception);
if (status == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x <= x2; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
}
start=x;
} while (x <= x2);
}
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
/*
Tile fill color onto floodplane.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,p) != 0)
{
GetFillColor(draw_info,x,y,&fill_color,exception);
SetPixelViaPixelInfo(image,&fill_color,q);
}
p+=GetPixelChannels(floodplane_image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
floodplane_view=DestroyCacheView(floodplane_view);
image_view=DestroyCacheView(image_view);
segment_info=RelinquishVirtualMemory(segment_info);
floodplane_image=DestroyImage(floodplane_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GradientImage() applies a continuously smooth color transitions along a
% vector from one color to another.
%
% Note, the interface of this method will change in the future to support
% more than one transistion.
%
% The format of the GradientImage method is:
%
% MagickBooleanType GradientImage(Image *image,const GradientType type,
% const SpreadMethod method,const PixelInfo *start_color,
% const PixelInfo *stop_color,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the gradient type: linear or radial.
%
% o spread: the gradient spread meathod: pad, reflect, or repeat.
%
% o start_color: the start color.
%
% o stop_color: the stop color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GradientImage(Image *image,
const GradientType type,const SpreadMethod method,const StopInfo *stops,
const size_t number_stops,ExceptionInfo *exception)
{
const char
*artifact;
DrawInfo
*draw_info;
GradientInfo
*gradient;
MagickBooleanType
status;
/*
Set gradient start-stop end points.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(stops != (const StopInfo *) NULL);
assert(number_stops > 0);
draw_info=AcquireDrawInfo();
gradient=(&draw_info->gradient);
gradient->type=type;
gradient->bounding_box.width=image->columns;
gradient->bounding_box.height=image->rows;
artifact=GetImageArtifact(image,"gradient:bounding-box");
if (artifact != (const char *) NULL)
(void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box);
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
artifact=GetImageArtifact(image,"gradient:direction");
if (artifact != (const char *) NULL)
{
GravityType
direction;
direction=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,artifact);
switch (direction)
{
case NorthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case WestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case EastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case SouthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
case SouthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->columns-1;
break;
}
case SouthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
default:
break;
}
}
artifact=GetImageArtifact(image,"gradient:angle");
if (artifact != (const char *) NULL)
gradient->angle=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"gradient:vector");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf",
&gradient->gradient_vector.x1,&gradient->gradient_vector.y1,
&gradient->gradient_vector.x2,&gradient->gradient_vector.y2);
if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:direction") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:extent") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:vector") == (const char *) NULL))
if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0))
gradient->gradient_vector.x2=0.0;
gradient->center.x=(double) gradient->gradient_vector.x2/2.0;
gradient->center.y=(double) gradient->gradient_vector.y2/2.0;
artifact=GetImageArtifact(image,"gradient:center");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x,
&gradient->center.y);
artifact=GetImageArtifact(image,"gradient:angle");
if ((type == LinearGradient) && (artifact != (const char *) NULL))
{
double
sine,
cosine,
distance;
/*
Reference https://drafts.csswg.org/css-images-3/#linear-gradients.
*/
sine=sin((double) DegreesToRadians(gradient->angle-90.0));
cosine=cos((double) DegreesToRadians(gradient->angle-90.0));
distance=fabs((double) (image->columns-1.0)*cosine)+
fabs((double) (image->rows-1.0)*sine);
gradient->gradient_vector.x1=0.5*((image->columns-1.0)-distance*cosine);
gradient->gradient_vector.y1=0.5*((image->rows-1.0)-distance*sine);
gradient->gradient_vector.x2=0.5*((image->columns-1.0)+distance*cosine);
gradient->gradient_vector.y2=0.5*((image->rows-1.0)+distance*sine);
}
gradient->radii.x=(double) MagickMax((image->columns-1.0),(image->rows-1.0))/
2.0;
gradient->radii.y=gradient->radii.x;
artifact=GetImageArtifact(image,"gradient:extent");
if (artifact != (const char *) NULL)
{
if (LocaleCompare(artifact,"Circle") == 0)
{
gradient->radii.x=(double) MagickMax((image->columns-1.0),
(image->rows-1.0))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Diagonal") == 0)
{
gradient->radii.x=(double) (sqrt((double) (image->columns-1.0)*
(image->columns-1.0)+(image->rows-1.0)*(image->rows-1.0)))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Ellipse") == 0)
{
gradient->radii.x=(double) (image->columns-1.0)/2.0;
gradient->radii.y=(double) (image->rows-1.0)/2.0;
}
if (LocaleCompare(artifact,"Maximum") == 0)
{
gradient->radii.x=(double) MagickMax((image->columns-1.0),
(image->rows-1.0))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Minimum") == 0)
{
gradient->radii.x=(double) (MagickMin((image->columns-1.0),
(image->rows-1.0)))/2.0;
gradient->radii.y=gradient->radii.x;
}
}
artifact=GetImageArtifact(image,"gradient:radii");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x,
&gradient->radii.y);
gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y);
gradient->spread=method;
/*
Define the gradient to fill between the stops.
*/
gradient->number_stops=number_stops;
gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops,
sizeof(*gradient->stops));
if (gradient->stops == (StopInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memcpy(gradient->stops,stops,(size_t) number_stops*sizeof(*stops));
/*
Draw a gradient on the image.
*/
status=DrawGradientImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O i l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OilPaintImage() applies a special effect filter that simulates an oil
% painting. Each pixel is replaced by the most frequent color occurring
% in a circular region defined by radius.
%
% The format of the OilPaintImage method is:
%
% Image *OilPaintImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the circular neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t **DestroyHistogramThreadSet(size_t **histogram)
{
ssize_t
i;
assert(histogram != (size_t **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (histogram[i] != (size_t *) NULL)
histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]);
histogram=(size_t **) RelinquishMagickMemory(histogram);
return(histogram);
}
static size_t **AcquireHistogramThreadSet(const size_t count)
{
ssize_t
i;
size_t
**histogram,
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram));
if (histogram == (size_t **) NULL)
return((size_t **) NULL);
(void) memset(histogram,0,number_threads*sizeof(*histogram));
for (i=0; i < (ssize_t) number_threads; i++)
{
histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram));
if (histogram[i] == (size_t *) NULL)
return(DestroyHistogramThreadSet(histogram));
}
return(histogram);
}
MagickExport Image *OilPaintImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define NumberPaintBins 256
#define OilPaintImageTag "OilPaint/Image"
CacheView
*image_view,
*paint_view;
Image
*linear_image,
*paint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
**histograms,
width;
ssize_t
center,
y;
/*
Initialize painted image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
paint_image=CloneImage(image,0,0,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (paint_image != (Image *) NULL)
linear_image=DestroyImage(paint_image);
return((Image *) NULL);
}
if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
return((Image *) NULL);
}
histograms=AcquireHistogramThreadSet(NumberPaintBins);
if (histograms == (size_t **) NULL)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Oil paint image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)*
(width/2L)+GetPixelChannels(linear_image)*(width/2L);
image_view=AcquireVirtualCacheView(linear_image,exception);
paint_view=AcquireAuthenticCacheView(paint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(linear_image,paint_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
size_t
*histogram;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),linear_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
histogram=histograms[GetOpenMPThreadId()];
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
ssize_t
i,
u;
size_t
count;
ssize_t
j,
k,
n,
v;
/*
Assign most frequent color.
*/
k=0;
j=0;
count=0;
(void) memset(histogram,0,NumberPaintBins* sizeof(*histogram));
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(
linear_image,p+GetPixelChannels(linear_image)*(u+k))));
histogram[n]++;
if (histogram[n] > count)
{
j=k+u;
count=histogram[n];
}
}
k+=(ssize_t) (linear_image->columns+width);
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(linear_image,i);
PixelTrait traits = GetPixelChannelTraits(linear_image,channel);
PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel);
if ((traits == UndefinedPixelTrait) ||
(paint_traits == UndefinedPixelTrait))
continue;
if ((paint_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(paint_image,channel,p[center+i],q);
continue;
}
SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+
i],q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(paint_image);
}
if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse)
status=MagickFalse;
if (linear_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(linear_image,OilPaintImageTag,progress,
linear_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
paint_view=DestroyCacheView(paint_view);
image_view=DestroyCacheView(image_view);
histograms=DestroyHistogramThreadSet(histograms);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
paint_image=DestroyImage(paint_image);
return(paint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaquePaintImage() changes any pixel that matches color with the color
% defined by fill argument.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the OpaquePaintImage method is:
%
% MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target,
% const PixelInfo *fill,const MagickBooleanType invert,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OpaquePaintImage(Image *image,
const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define OpaquePaintImageTag "Opaque/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
conform_fill,
conform_target,
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
assert(fill != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
ConformPixelInfo(image,fill,&conform_fill,exception);
ConformPixelInfo(image,target,&conform_target,exception);
/*
Make image color opaque.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert)
{
PixelTrait
traits;
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelRed(image,(Quantum) conform_fill.red,q);
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelGreen(image,(Quantum) conform_fill.green,q);
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlue(image,(Quantum) conform_fill.blue,q);
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelBlack(image,(Quantum) conform_fill.black,q);
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
SetPixelAlpha(image,(Quantum) conform_fill.alpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,OpaquePaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const PixelInfo *target,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImage(Image *image,
const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransparentPaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e C h r o m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImageChroma() changes the opacity value associated with any
% pixel that matches color to the value defined by opacity.
%
% As there is one fuzz value for the all the channels, TransparentPaintImage()
% is not suitable for the operations like chroma, where the tolerance for
% similarity of two color component (RGB) can be different. Thus we define
% this method to take two target pixels (one low and one high) and all the
% pixels of an image which are lying between these two pixels are made
% transparent.
%
% The format of the TransparentPaintImageChroma method is:
%
% MagickBooleanType TransparentPaintImageChroma(Image *image,
% const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low: the low target color.
%
% o high: the high target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image,
const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
const MagickBooleanType invert,ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(high != (PixelInfo *) NULL);
assert(low != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
match;
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
match=((pixel.red >= low->red) && (pixel.red <= high->red) &&
(pixel.green >= low->green) && (pixel.green <= high->green) &&
(pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue :
MagickFalse;
if (match != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransparentPaintImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
mkldnn_quantize_v2-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file mkldnn_quantize_v2-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
#define MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
#if MXNET_USE_MKLDNN == 1
#include <algorithm>
#include <string>
#include <vector>
#include "../../nn/mkldnn/mkldnn_base-inl.h"
#include "../quantize_v2-inl.h"
namespace mxnet {
namespace op {
class SgMKLDNNQuantizeOperator {
public:
explicit SgMKLDNNQuantizeOperator(const nnvm::NodeAttrs &attrs)
: param_(nnvm::get<QuantizeV2Param>(attrs.parsed)) {}
void Forward(const OpContext &ctx, const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs);
private:
bool initalized_{false};
QuantizeV2Param param_;
float cached_data_min_{0.f};
float cached_data_max_{0.f};
std::shared_ptr<mkldnn::memory> i_mem_;
std::shared_ptr<mkldnn::memory> o_mem_;
std::shared_ptr<mkldnn::reorder> fwd_pd_;
};
void SgMKLDNNQuantizeOperator::Forward(const OpContext &ctx, const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
float quantized_range = 0.0;
NDArray in_buffer = inputs[0];
float data_min = mshadow::red::limits::MaxValue<float>();
float data_max = mshadow::red::limits::MinValue<float>();
// Pass through quantized data
if (inputs[0].dtype() == mshadow::kUint8 || inputs[0].dtype() == mshadow::kInt8) {
if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) {
*outputs[1].data().dptr<float>() = param_.min_calib_range.value();
*outputs[2].data().dptr<float>() = param_.max_calib_range.value();
} else {
if (inputs[0].dtype() == mshadow::kUint8) {
*outputs[1].data().dptr<float>() = 0;
*outputs[2].data().dptr<float>() = kUint8Range;
} else {
*outputs[1].data().dptr<float>() = -kInt8Range;
*outputs[2].data().dptr<float>() = kInt8Range;
}
}
if (req[0] != kWriteInplace) {
const_cast<NDArray &>(outputs[0]).CopyFrom(*inputs[0].GetMKLDNNData());
MKLDNNStream::Get()->Submit();
}
} else {
if (in_buffer.IsView() && in_buffer.IsMKLDNNData()) in_buffer = inputs[0].Reorder2Default();
auto i_mem = in_buffer.GetMKLDNNData();
if (param_.min_calib_range.has_value() && param_.max_calib_range.has_value()) {
data_min = param_.min_calib_range.value();
data_max = param_.max_calib_range.value();
} else {
// no calib info
in_buffer = inputs[0].Reorder2Default();
auto in_ptr = in_buffer.data().dptr<float>();
auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
std::vector<float> data_maxs(nthreads, data_max);
std::vector<float> data_mins(nthreads, data_min);
#pragma omp parallel for num_threads(nthreads)
for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) {
int tid = omp_get_thread_num();
if (in_ptr[i] > data_maxs[tid]) data_maxs[tid] = in_ptr[i];
if (in_ptr[i] < data_mins[tid]) data_mins[tid] = in_ptr[i];
}
for (index_t i = 0; i < nthreads; i++) {
if (data_maxs[i] > data_max) data_max = data_maxs[i];
if (data_mins[i] < data_min) data_min = data_mins[i];
}
if (initalized_ && (cached_data_min_ != data_min || cached_data_max_ != data_max))
initalized_ = false;
}
// Write output min/max
auto out_type = GetQuantizeOutputType(param_);
if (out_type == mshadow::kUint8) {
quantized_range = kUint8Range;
*outputs[1].data().dptr<float>() = data_min;
*outputs[2].data().dptr<float>() = data_max;
} else if (out_type == mshadow::kInt8) {
float real_range = MaxAbs(data_min, data_max);
quantized_range = kInt8Range;
*outputs[1].data().dptr<float>() = -real_range;
*outputs[2].data().dptr<float>() = real_range;
} else {
LOG(FATAL) << "mkldnn quantize op only supports int8 and uint8 as output type";
}
if (!initalized_) {
cached_data_min_ = data_min;
cached_data_max_ = data_max;
float real_range = MaxAbs(data_min, data_max);
float scale = quantized_range / real_range;
primitive_attr attr;
const int mask = 0;
std::vector<float> scales = {scale};
attr.set_output_scales(mask, scales);
attr.set_int_output_round_mode(round_nearest);
mkldnn::engine cpu_engine = mxnet::CpuEngine::Get()->get_engine();
auto i_mpd = i_mem->get_primitive_desc();
auto i_desc = i_mpd.desc();
mkldnn::memory::format i_fmt = static_cast<mkldnn::memory::format>(i_desc.data.format);
if (i_fmt == mkldnn::memory::format::nchw || i_fmt == mkldnn::memory::format::nChw8c ||
i_fmt == mkldnn_nChw16c) {
i_fmt = mkldnn::memory::format::nhwc;
}
size_t i_ndim = in_buffer.shape().ndim();
mkldnn::memory::dims i_dims = mkldnn::memory::dims(i_ndim);
for (size_t i = 0; i < i_ndim; i++) {
i_dims[i] = static_cast<int>(in_buffer.shape()[i]);
}
auto o_desc = mkldnn::memory::desc(i_dims, get_mkldnn_type(out_type), i_fmt);
auto o_mpd = memory::primitive_desc(o_desc, cpu_engine);
auto reorder_pd = reorder::primitive_desc(i_mpd, o_mpd, attr);
i_mem_ = std::make_shared<mkldnn::memory>(i_mpd, nullptr);
o_mem_ = std::make_shared<mkldnn::memory>(o_mpd, nullptr);
fwd_pd_ = std::make_shared<mkldnn::reorder>(reorder_pd, *i_mem_, *o_mem_);
initalized_ = true;
}
auto o_mem = CreateMKLDNNMem(outputs[0], o_mem_->get_primitive_desc(), req[0]);
i_mem_->set_data_handle(i_mem->get_data_handle());
o_mem_->set_data_handle(o_mem.second->get_data_handle());
MKLDNNStream::Get()->RegisterPrim(*fwd_pd_);
CommitOutput(outputs[0], o_mem);
MKLDNNStream::Get()->Submit();
}
}
static void SgMKLDNNQuantizeForward(const OpStatePtr &state_ptr, const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
SgMKLDNNQuantizeOperator &op = state_ptr.get_state<SgMKLDNNQuantizeOperator>();
op.Forward(ctx, inputs, req, outputs);
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_MKLDNN == 1
#endif // MXNET_OPERATOR_QUANTIZATION_MKLDNN_MKLDNN_QUANTIZE_V2_INL_H_
|
openmp.c | #include <sc.h>
#include <omp.h>
omp_lock_t writelock;
void
openmp_print_tid (void)
{
omp_set_lock (&writelock);
SC_PRODUCTIONF ("Hello from thread %i.\n", omp_get_thread_num ());
omp_unset_lock (&writelock);
}
int
main (int argc, char *argv[])
{
int mpiret, mpisize;
int thread_lvl, num_threads;
mpiret =
sc_MPI_Init_thread (&argc, &argv, sc_MPI_THREAD_MULTIPLE, &thread_lvl);
SC_CHECK_MPI (mpiret);
sc_init (sc_MPI_COMM_WORLD, 1, 1, NULL, SC_LP_DEFAULT);
if (thread_lvl < sc_MPI_THREAD_MULTIPLE) {
SC_GLOBAL_PRODUCTIONF ("MPI only supports thread level %d\n", thread_lvl);
}
else {
mpiret = sc_MPI_Comm_size (sc_MPI_COMM_WORLD, &mpisize);
SC_CHECK_MPI (mpiret);
num_threads = omp_get_max_threads ();
SC_GLOBAL_PRODUCTIONF ("Running on %i processes with %i threads each.\n",
mpisize, num_threads);
omp_set_num_threads (num_threads);
omp_init_lock (&writelock);
#pragma omp parallel
{
openmp_print_tid ();
}
}
return 0;
}
|
parfor.h | // Copyright 2019 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef PARFOR_H_
#define PARFOR_H_
#include <omp.h>
#include <cstdint>
#include <vector>
namespace qsim {
template <uint64_t MIN_SIZE>
struct ParallelForT {
template <typename Function, typename... Args>
static void Run(
unsigned num_threads, uint64_t size, Function&& func, Args&&... args) {
if (num_threads > 1 && size >= MIN_SIZE) {
#pragma omp parallel num_threads(num_threads)
{
unsigned n = omp_get_num_threads();
unsigned m = omp_get_thread_num();
uint64_t i0 = size * m / n;
uint64_t i1 = size * (m + 1) / n;
for (uint64_t i = i0; i < i1; ++i) {
func(n, m, i, args...);
}
}
} else {
for (uint64_t i = 0; i < size; ++i) {
func(1, 0, i, args...);
}
}
}
template <typename Function, typename Op, typename... Args>
static typename Op::result_type RunReduce(unsigned num_threads,
uint64_t size, Function&& func,
Op&& op, Args&&... args) {
typename Op::result_type result = 0;
if (num_threads > 1 && size >= MIN_SIZE) {
std::vector<typename Op::result_type> partial_results(num_threads, 0);
#pragma omp parallel num_threads(num_threads)
{
unsigned n = omp_get_num_threads();
unsigned m = omp_get_thread_num();
uint64_t i0 = size * m / n;
uint64_t i1 = size * (m + 1) / n;
typename Op::result_type partial_result = 0;
for (uint64_t i = i0; i < i1; ++i) {
partial_result = op(partial_result, func(n, m, i, args...));
}
partial_results[m] = partial_result;
}
for (unsigned i = 0; i < num_threads; ++i) {
result = op(result, partial_results[i]);
}
} else if (num_threads > 0) {
for (uint64_t i = 0; i < size; ++i) {
result = op(result, func(1, 0, i, args...));
}
}
return result;
}
};
using ParallelFor = ParallelForT<1024>;
} // namespace qsim
#endif // PARFOR_H_
|
matrix.c |
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include "my_timers.h"
#define M 10
/**
* @brief Get the Cofactor object
*
* @param mat
* @param temp
* @param p
* @param q
* @param n
*/
void getCofactor(long** mat, long** temp, int p,
int q, int n)
{
int i = 0, j = 0;
// Looping for each element of the matrix
for (int row = 0; row < n; row++)
{
for (int col = 0; col < n; col++)
{
// Copying into temporary matrix only those
// element which are not in given row and
// column
if (row != p && col != q)
{
temp[i][j++] = mat[row][col];
// Row is filled, so increase row index and
// reset col index
if (j == n - 1)
{
j = 0;
i++;
}
}
}
}
}
/**
* @brief Calculates determinant of "mat" n*n size matrix
* @param mat - pointer to long type matrix
* @param n - size of square matrix
* @return determinant of "mat" matrix, type long
*/
long determinantOfMatrix(long** mat, int n)
{
long D = 0; // Initialize result
// Base case : if matrix contains single element
if (n == 1){
return mat[0][0];
}
long **temp = NULL;
temp = (long**)malloc(n*sizeof(long));
int i = 0;
int j = 0;
for(i=0; i < n; i++) {
temp[i] = (long*)malloc(sizeof(long)*M);
}
int sign = 1; // To store sign multiplier
// Iterate for each element of first row
for (j = 0; j < n; j++)
{
// Getting Cofactor of mat[0][f]
getCofactor(mat, temp, 0, j, n);
D += sign * mat[0][j]
* determinantOfMatrix(temp, n - 1);
// terms are to be added with alternate sign
sign = -sign;
}
for(i = 0; i < n; i++){
free(temp[i]);
}
free(temp);
return D;
}
void main(void){
int m = M;
long det = 0;
time_t tt;
int tim = 0;
srand(tim);
int i = 0;
int j = 0;
int y = 0;
long **a = NULL;
a = (long**)malloc(M*sizeof(long*));
for(i=0; i < m; i++) {
a[i] = (long*)malloc(sizeof(long*)*M);
}
if(a != NULL){
int j = 0;
for(i = 0; i < M; i++){
for(j = 0; j < M; j++){
a[i][j] = (long)(rand()%5);
printf(" %d ", a[i][j]);
}
printf("\n");
}
printf("********\n");
}
start_time();
long **b = NULL;
omp_set_num_threads(2);
#pragma omp parallel for private(i, j, b) reduction(+:det)
for(y = 0; y < M; y++){
b = (long**)malloc(M*sizeof(long*));
for(i=0; i < m; i++) {
b[i] = (long*)malloc(sizeof(long*)*M);
}
for(int x = 0; x < M; x++){
for(int g = 0; g < M; g++){
b[x][g] = 0;
}
}
getCofactor(a, b, 0, y, m);
if(y%2){
det += (-1)*a[0][y]*determinantOfMatrix(b, m-1);
}
else{
det += a[0][y]*determinantOfMatrix(b, m-1);
}
for(i=0; i < m; i++) {
free(b[i]);
}
free(b);
}
stop_time();
print_time("Elapsed time for n threads:");
long det2 = 0;
start_time();
det2 = determinantOfMatrix(a, m);
stop_time();
print_time("Elapsed time for normal:");
printf("Det: %d\n", det);
printf("Det2: %d\n", det2);
for(i=0; i < m; i++) {
free(a[i]);
}
free(a);
} |
cmontecarlo.c |
#include <inttypes.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#ifdef WITHOPENMP
#include <omp.h>
#endif
#include "io.h"
#include "abbrev.h"
#include "status.h"
#include "rpacket.h"
#include "cmontecarlo.h"
/** Look for a place to insert a value in an inversely sorted float array.
*
* @param x an inversely (largest to lowest) sorted float array
* @param x_insert a value to insert
* @param imin lower bound
* @param imax upper bound
*
* @return index of the next boundary to the left
*/
tardis_error_t
reverse_binary_search (const double *x, double x_insert,
int64_t imin, int64_t imax, int64_t * result)
{
/*
Have in mind that *x points to a reverse sorted array.
That is large values will have small indices and small ones
will have large indices.
*/
tardis_error_t ret_val = TARDIS_ERROR_OK;
if (x_insert > x[imin] || x_insert < x[imax])
{
ret_val = TARDIS_ERROR_BOUNDS_ERROR;
}
else
{
int imid = (imin + imax) >> 1;
while (imax - imin > 2)
{
if (x[imid] < x_insert)
{
imax = imid + 1;
}
else
{
imin = imid;
}
imid = (imin + imax) >> 1;
}
if (imax - imin == 2 && x_insert < x[imin + 1])
{
*result = imin + 1;
}
else
{
*result = imin;
}
}
return ret_val;
}
/** Insert a value in to an array of line frequencies
*
* @param nu array of line frequencies
* @param nu_insert value of nu key
* @param number_of_lines number of lines in the line list
*
* @return index of the next line ot the red. If the key value is redder than the reddest line returns number_of_lines.
*/
tardis_error_t
line_search (const double *nu, double nu_insert, int64_t number_of_lines,
int64_t * result)
{
tardis_error_t ret_val = TARDIS_ERROR_OK;
int64_t imin = 0;
int64_t imax = number_of_lines - 1;
if (nu_insert > nu[imin])
{
*result = imin;
}
else if (nu_insert < nu[imax])
{
*result = imax + 1;
}
else
{
ret_val = reverse_binary_search (nu, nu_insert, imin, imax, result);
*result = *result + 1;
}
return ret_val;
}
tardis_error_t
binary_search (const double *x, double x_insert, int64_t imin,
int64_t imax, int64_t * result)
{
/*
Have in mind that *x points to a sorted array.
Like [1,2,3,4,5,...]
*/
int imid;
tardis_error_t ret_val = TARDIS_ERROR_OK;
if (x_insert < x[imin] || x_insert > x[imax])
{
ret_val = TARDIS_ERROR_BOUNDS_ERROR;
}
else
{
while (imax >= imin)
{
imid = (imin + imax) / 2;
if (x[imid] == x_insert)
{
*result = imid;
break;
}
else if (x[imid] < x_insert)
{
imin = imid + 1;
}
else
{
imax = imid - 1;
}
}
if (imax - imid == 2 && x_insert < x[imin + 1])
{
*result = imin;
}
else
{
*result = imin;
}
}
return ret_val;
}
void
angle_aberration_CMF_to_LF (rpacket_t *packet, const storage_model_t *storage)
{
if (storage->full_relativity)
{
double beta = rpacket_get_r (packet) * storage->inverse_time_explosion * INVERSE_C;
double mu_0 = rpacket_get_mu (packet);
rpacket_set_mu (packet, (mu_0 + beta) / (1.0 + beta * mu_0));
}
}
/** Transform the lab frame direction cosine to the CMF
*
* @param packet
* @param storage
* @param mu lab frame direction cosine
*
* @return CMF direction cosine
*/
double
angle_aberration_LF_to_CMF (rpacket_t *packet, const storage_model_t *storage, double mu)
{
double beta = rpacket_get_r (packet) * storage->inverse_time_explosion * INVERSE_C;
return (mu - beta) / (1.0 - beta * mu);
}
double
rpacket_doppler_factor (const rpacket_t *packet, const storage_model_t *storage)
{
double beta = rpacket_get_r (packet) * storage->inverse_time_explosion * INVERSE_C;
if (!storage->full_relativity)
{
return 1.0 - rpacket_get_mu (packet) * beta;
}
else
{
return (1.0 - rpacket_get_mu (packet) * beta) / sqrt (1 - beta * beta);
}
}
double
rpacket_inverse_doppler_factor (const rpacket_t *packet, const storage_model_t *storage)
{
double beta = rpacket_get_r (packet) * storage->inverse_time_explosion * INVERSE_C;
if (!storage->full_relativity)
{
return 1.0 / (1.0 - rpacket_get_mu (packet) * beta);
}
else
{
return (1.0 + rpacket_get_mu (packet) * beta) / sqrt (1 - beta * beta);
}
}
double
bf_cross_section (const storage_model_t * storage, int64_t continuum_id, double comov_nu)
{
double bf_xsect;
double *x_sect = storage->photo_xsect[continuum_id]->x_sect;
double *nu = storage->photo_xsect[continuum_id]->nu;
switch (storage->bf_treatment)
{
case LIN_INTERPOLATION:
{
int64_t result;
tardis_error_t error = binary_search (nu, comov_nu, 0,
storage->photo_xsect[continuum_id]->no_of_points - 1, &result);
if (error == TARDIS_ERROR_BOUNDS_ERROR)
{
bf_xsect = 0.0;
}
else
{
bf_xsect = x_sect[result-1] + (comov_nu - nu[result-1]) / (nu[result] - nu[result-1])
* (x_sect[result] - x_sect[result-1]);
}
break;
}
case HYDROGENIC:
{
double nu_ratio = nu[0] / comov_nu;
bf_xsect = x_sect[0] * nu_ratio * nu_ratio * nu_ratio;
break;
}
default:
fprintf (stderr, "(%d) is not a valid bound-free cross section treatment.\n", storage->bf_treatment);
exit(1);
}
return bf_xsect;
}
void calculate_chi_bf (rpacket_t * packet, storage_model_t * storage)
{
double doppler_factor = rpacket_doppler_factor (packet, storage);
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
int64_t no_of_continuum_edges = storage->no_of_edges;
int64_t current_continuum_id;
line_search(storage->continuum_list_nu, comov_nu, no_of_continuum_edges, ¤t_continuum_id);
rpacket_set_current_continuum_id (packet, current_continuum_id);
int64_t shell_id = rpacket_get_current_shell_id (packet);
double T = storage->t_electrons[shell_id];
double boltzmann_factor = exp (-(H * comov_nu) / (KB * T));
double bf_helper = 0;
for(int64_t i = current_continuum_id; i < no_of_continuum_edges; i++)
{
// get the level population for the level ijk in the current shell:
double l_pop = storage->l_pop[shell_id * no_of_continuum_edges + i];
// get the level population ratio \frac{n_{0,j+1,k}}{n_{i,j,k}} \frac{n_{i,j,k}}{n_{0,j+1,k}}^{*}:
double l_pop_r = storage->l_pop_r[shell_id * no_of_continuum_edges + i];
double bf_x_sect = bf_cross_section (storage, i, comov_nu);
if (bf_x_sect == 0.0)
{
break;
}
bf_helper += l_pop * bf_x_sect * (1.0 - l_pop_r * boltzmann_factor) * doppler_factor;
packet->chi_bf_tmp_partial[i] = bf_helper;
}
rpacket_set_chi_boundfree (packet, bf_helper);
}
void calculate_chi_ff (rpacket_t * packet, const storage_model_t * storage)
{
double doppler_factor = rpacket_doppler_factor (packet, storage);
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
int64_t shell_id = rpacket_get_current_shell_id (packet);
double T = storage->t_electrons[shell_id];
double boltzmann_factor = exp (-(H * comov_nu) / KB / T);
double chi_ff_factor = storage->chi_ff_factor[shell_id];
double chi_ff = chi_ff_factor * (1 - boltzmann_factor) * pow (comov_nu, -3);
rpacket_set_chi_freefree (packet, chi_ff * doppler_factor);
}
void
compute_distance2boundary (rpacket_t * packet, const storage_model_t * storage)
{
double r = rpacket_get_r (packet);
double mu = rpacket_get_mu (packet);
double r_outer = storage->r_outer[rpacket_get_current_shell_id (packet)];
double r_inner = storage->r_inner[rpacket_get_current_shell_id (packet)];
double check, distance;
if (mu > 0.0)
{ // direction outward
rpacket_set_next_shell_id (packet, 1);
distance = sqrt (r_outer * r_outer + ((mu * mu - 1.0) * r * r)) - (r * mu);
}
else
{ // going inward
if ( (check = r_inner * r_inner + (r * r * (mu * mu - 1.0)) )>= 0.0)
{ // hit inner boundary
rpacket_set_next_shell_id (packet, -1);
distance = - r * mu - sqrt (check);
}
else
{ // miss inner boundary
rpacket_set_next_shell_id (packet, 1);
distance = sqrt (r_outer * r_outer + ((mu * mu - 1.0) * r * r)) - (r * mu);
}
}
rpacket_set_d_boundary (packet, distance);
}
tardis_error_t
compute_distance2line (rpacket_t * packet, const storage_model_t * storage)
{
if (!rpacket_get_last_line (packet))
{
double r = rpacket_get_r (packet);
double mu = rpacket_get_mu (packet);
double nu = rpacket_get_nu (packet);
double nu_line = rpacket_get_nu_line (packet);
double distance, nu_diff;
double ct = storage->time_explosion * C;
double doppler_factor = rpacket_doppler_factor (packet, storage);
double comov_nu = nu * doppler_factor;
if ( (nu_diff = comov_nu - nu_line) >= 0)
{
if (!storage->full_relativity)
{
distance = (nu_diff / nu) * ct;
}
else
{
double nu_r = nu_line / nu;
distance = - mu * r + (ct - nu_r * nu_r * sqrt(ct * ct -
(1 + r * r * (1 - mu * mu) * (1 + pow (nu_r, -2))))) / (1 + nu_r * nu_r);
}
rpacket_set_d_line (packet, distance);
return TARDIS_ERROR_OK;
}
else
{
if (rpacket_get_next_line_id (packet) == storage->no_of_lines - 1)
{
fprintf (stderr, "last_line = %f\n",
storage->
line_list_nu[rpacket_get_next_line_id (packet) - 1]);
fprintf (stderr, "Last line in line list reached!");
}
else if (rpacket_get_next_line_id (packet) == 0)
{
fprintf (stderr, "First line in line list!");
fprintf (stderr, "next_line = %f\n",
storage->
line_list_nu[rpacket_get_next_line_id (packet) + 1]);
}
else
{
fprintf (stderr, "last_line = %f\n",
storage->
line_list_nu[rpacket_get_next_line_id (packet) - 1]);
fprintf (stderr, "next_line = %f\n",
storage->
line_list_nu[rpacket_get_next_line_id (packet) + 1]);
}
fprintf (stderr, "ERROR: Comoving nu less than nu_line!\n");
fprintf (stderr, "comov_nu = %f\n", comov_nu);
fprintf (stderr, "nu_line = %f\n", nu_line);
fprintf (stderr, "(comov_nu - nu_line) / nu_line = %f\n",
(comov_nu - nu_line) / nu_line);
fprintf (stderr, "r = %f\n", r);
fprintf (stderr, "mu = %f\n", mu);
fprintf (stderr, "nu = %f\n", nu);
fprintf (stderr, "doppler_factor = %f\n", doppler_factor);
fprintf (stderr, "cur_zone_id = %" PRIi64 "\n", rpacket_get_current_shell_id (packet));
return TARDIS_ERROR_COMOV_NU_LESS_THAN_NU_LINE;
}
}
else
{
rpacket_set_d_line (packet, MISS_DISTANCE);
return TARDIS_ERROR_OK;
}
}
void
compute_distance2continuum (rpacket_t * packet, storage_model_t * storage)
{
double chi_continuum, d_continuum;
double chi_electron = storage->electron_densities[rpacket_get_current_shell_id(packet)] *
storage->sigma_thomson;
if (storage->full_relativity)
{
chi_electron *= rpacket_doppler_factor (packet, storage);
}
if (storage->cont_status == CONTINUUM_ON)
{
if (packet->compute_chi_bf)
{
calculate_chi_bf (packet, storage);
calculate_chi_ff (packet, storage);
}
else
{
packet->compute_chi_bf=true;
}
chi_continuum = rpacket_get_chi_boundfree (packet) + rpacket_get_chi_freefree (packet) + chi_electron;
d_continuum = rpacket_get_tau_event (packet) / chi_continuum;
}
else
{
chi_continuum = chi_electron;
d_continuum = storage->inverse_electron_densities[rpacket_get_current_shell_id (packet)] *
storage->inverse_sigma_thomson * rpacket_get_tau_event (packet);
}
if (rpacket_get_virtual_packet(packet) > 0)
{
//Set all continuum distances to MISS_DISTANCE in case of an virtual_packet
d_continuum = MISS_DISTANCE;
packet->compute_chi_bf = false;
}
else
{
// fprintf(stderr, "--------\n");
// fprintf(stderr, "nu = %e \n", rpacket_get_nu(packet));
// fprintf(stderr, "chi_electron = %e\n", chi_electron);
// fprintf(stderr, "chi_boundfree = %e\n", calculate_chi_bf(packet, storage));
// fprintf(stderr, "chi_line = %e \n", rpacket_get_tau_event(packet) / rpacket_get_d_line(packet));
// fprintf(stderr, "--------\n");
//rpacket_set_chi_freefree(packet, chi_freefree);
rpacket_set_chi_electron (packet, chi_electron);
}
rpacket_set_chi_continuum (packet, chi_continuum);
rpacket_set_d_continuum (packet, d_continuum);
}
void
macro_atom (rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state)
{
int emit = 0, i = 0, offset = -1;
uint64_t activate_level = rpacket_get_macro_atom_activation_level (packet);
while (emit >= 0)
{
double event_random = rk_double (mt_state);
i = storage->macro_block_references[activate_level] - 1;
double p = 0.0;
offset = storage->transition_probabilities_nd *
rpacket_get_current_shell_id (packet);
do
{
++i;
p += storage->transition_probabilities[offset + i];
}
while (p <= event_random);
emit = storage->transition_type[i];
activate_level = storage->destination_level_id[i];
}
switch (emit)
{
case BB_EMISSION:
line_emission (packet, storage, storage->transition_line_id[i], mt_state);
break;
case BF_EMISSION:
rpacket_set_current_continuum_id (packet, storage->transition_line_id[i]);
storage->last_line_interaction_out_id[rpacket_get_id (packet)] =
rpacket_get_current_continuum_id (packet);
continuum_emission (packet, storage, mt_state, sample_nu_free_bound, 3);
break;
case FF_EMISSION:
continuum_emission (packet, storage, mt_state, sample_nu_free_free, 4);
break;
case ADIABATIC_COOLING:
storage->last_interaction_type[rpacket_get_id (packet)] = 5;
rpacket_set_status (packet, TARDIS_PACKET_STATUS_REABSORBED);
break;
default:
fprintf (stderr, "This process for macro-atom deactivation should not exist! (emit = %d)\n", emit);
exit(1);
}
}
void
move_packet (rpacket_t * packet, storage_model_t * storage, double distance)
{
double doppler_factor = rpacket_doppler_factor (packet, storage);
if (distance > 0.0)
{
double r = rpacket_get_r (packet);
double new_r =
sqrt (r * r + distance * distance +
2.0 * r * distance * rpacket_get_mu (packet));
rpacket_set_mu (packet,
(rpacket_get_mu (packet) * r + distance) / new_r);
rpacket_set_r (packet, new_r);
if (rpacket_get_virtual_packet (packet) <= 0)
{
double comov_energy = rpacket_get_energy (packet) * doppler_factor;
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
if (storage->full_relativity)
{
distance *= doppler_factor;
}
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->js[rpacket_get_current_shell_id (packet)] +=
comov_energy * distance;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->nubars[rpacket_get_current_shell_id (packet)] +=
comov_energy * distance * comov_nu;
if (storage->cont_status)
{
increment_continuum_estimators(packet, storage, distance, comov_nu, comov_energy);
}
}
}
}
void
increment_continuum_estimators (const rpacket_t * packet, storage_model_t * storage, double distance,
double comov_nu, double comov_energy)
{
int64_t current_continuum_id;
int64_t no_of_continuum_edges = storage->no_of_edges;
int64_t shell_id = rpacket_get_current_shell_id (packet);
line_search(storage->continuum_list_nu, comov_nu, no_of_continuum_edges, ¤t_continuum_id);
double T = storage->t_electrons[shell_id];
double boltzmann_factor = exp (-(H * comov_nu) / (KB * T));
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->ff_heating_estimator[shell_id] += comov_energy * distance * rpacket_get_chi_freefree (packet);
for(int64_t i = current_continuum_id; i < no_of_continuum_edges; i++)
{
double bf_xsect = bf_cross_section (storage, i, comov_nu);
int64_t photo_ion_idx = i * storage->no_of_shells + shell_id;
double photo_ion_estimator_helper = comov_energy * distance * bf_xsect / comov_nu;
double bf_heating_estimator_helper =
comov_energy * distance * bf_xsect * (1. - storage->continuum_list_nu[i] / comov_nu);
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->photo_ion_estimator[photo_ion_idx] += photo_ion_estimator_helper;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->stim_recomb_estimator[photo_ion_idx] += photo_ion_estimator_helper * boltzmann_factor;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->bf_heating_estimator[photo_ion_idx] += bf_heating_estimator_helper;
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->stim_recomb_cooling_estimator[photo_ion_idx] += bf_heating_estimator_helper * boltzmann_factor;
if (photo_ion_estimator_helper != 0.0)
{
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->photo_ion_estimator_statistics[photo_ion_idx] += 1;
}
else
{
break;
}
}
}
double
get_increment_j_blue_estimator_energy (const rpacket_t * packet,
const storage_model_t * storage,
double d_line)
{
double energy;
if (storage->full_relativity)
{
// Accurate up to a factor 1 / gamma
energy = rpacket_get_energy (packet);
}
else
{
double r = rpacket_get_r (packet);
double r_interaction = sqrt (r * r + d_line * d_line +
2.0 * r * d_line * rpacket_get_mu (packet));
double mu_interaction = (rpacket_get_mu (packet) * r + d_line) / r_interaction;
double doppler_factor = 1.0 - mu_interaction * r_interaction *
storage->inverse_time_explosion * INVERSE_C;
energy = rpacket_get_energy (packet) * doppler_factor;
}
return energy;
}
void
increment_j_blue_estimator (const rpacket_t * packet, storage_model_t * storage,
double d_line, int64_t j_blue_idx)
{
if (storage->line_lists_j_blues != NULL)
{
double energy = get_increment_j_blue_estimator_energy (packet, storage,
d_line);
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->line_lists_j_blues[j_blue_idx] +=
energy / rpacket_get_nu (packet);
}
}
void
increment_Edotlu_estimator (const rpacket_t * packet, storage_model_t * storage,
double d_line, int64_t line_idx)
{
if (storage->line_lists_Edotlu != NULL)
{
double energy = get_increment_j_blue_estimator_energy (packet, storage,
d_line);
#ifdef WITHOPENMP
#pragma omp atomic
#endif
storage->line_lists_Edotlu[line_idx] += energy;
}
}
int64_t
montecarlo_one_packet (storage_model_t * storage, rpacket_t * packet,
int64_t virtual_mode, rk_state *mt_state)
{
int64_t reabsorbed=-1;
if (virtual_mode == 0)
{
reabsorbed = montecarlo_one_packet_loop (storage, packet, 0, mt_state);
}
else
{
if ((rpacket_get_nu (packet) > storage->spectrum_virt_start_nu) && (rpacket_get_nu(packet) < storage->spectrum_virt_end_nu))
{
for (int64_t i = 0; i < rpacket_get_virtual_packet_flag (packet); i++)
{
double weight;
rpacket_t virt_packet = *packet;
double mu_min;
if (rpacket_get_r(&virt_packet) > storage->r_inner[0])
{
mu_min =
-1.0 * sqrt (1.0 -
(storage->r_inner[0] / rpacket_get_r(&virt_packet)) *
(storage->r_inner[0] / rpacket_get_r(&virt_packet)));
if (storage->full_relativity)
{
// Need to transform the angular size of the photosphere into the CMF
mu_min = angle_aberration_LF_to_CMF (&virt_packet, storage, mu_min);
}
}
else
{
mu_min = 0.0;
}
double mu_bin = (1.0 - mu_min) / rpacket_get_virtual_packet_flag (packet);
rpacket_set_mu(&virt_packet,mu_min + (i + rk_double (mt_state)) * mu_bin);
switch (virtual_mode)
{
case -2:
weight = 1.0 / rpacket_get_virtual_packet_flag (packet);
break;
case -1:
weight =
2.0 * rpacket_get_mu(&virt_packet) /
rpacket_get_virtual_packet_flag (packet);
break;
case 1:
weight =
(1.0 -
mu_min) / 2.0 / rpacket_get_virtual_packet_flag (packet);
break;
default:
fprintf (stderr, "Something has gone horribly wrong!\n");
// FIXME MR: we need to somehow signal an error here
// I'm adding an exit() here to inform the compiler about the impossible path
exit(1);
}
angle_aberration_CMF_to_LF (&virt_packet, storage);
double doppler_factor_ratio =
rpacket_doppler_factor (packet, storage) /
rpacket_doppler_factor (&virt_packet, storage);
rpacket_set_energy(&virt_packet,
rpacket_get_energy (packet) * doppler_factor_ratio);
rpacket_set_nu(&virt_packet,rpacket_get_nu (packet) * doppler_factor_ratio);
reabsorbed = montecarlo_one_packet_loop (storage, &virt_packet, 1, mt_state);
#ifdef WITH_VPACKET_LOGGING
#ifdef WITHOPENMP
#pragma omp critical
{
#endif // WITHOPENMP
if (storage->virt_packet_count >= storage->virt_array_size)
{
storage->virt_array_size *= 2;
storage->virt_packet_nus = safe_realloc(storage->virt_packet_nus, sizeof(double) * storage->virt_array_size);
storage->virt_packet_energies = safe_realloc(storage->virt_packet_energies, sizeof(double) * storage->virt_array_size);
storage->virt_packet_last_interaction_in_nu = safe_realloc(storage->virt_packet_last_interaction_in_nu, sizeof(double) * storage->virt_array_size);
storage->virt_packet_last_interaction_type = safe_realloc(storage->virt_packet_last_interaction_type, sizeof(int64_t) * storage->virt_array_size);
storage->virt_packet_last_line_interaction_in_id = safe_realloc(storage->virt_packet_last_line_interaction_in_id, sizeof(int64_t) * storage->virt_array_size);
storage->virt_packet_last_line_interaction_out_id = safe_realloc(storage->virt_packet_last_line_interaction_out_id, sizeof(int64_t) * storage->virt_array_size);
}
storage->virt_packet_nus[storage->virt_packet_count] = rpacket_get_nu(&virt_packet);
storage->virt_packet_energies[storage->virt_packet_count] = rpacket_get_energy(&virt_packet) * weight;
storage->virt_packet_last_interaction_in_nu[storage->virt_packet_count] = storage->last_interaction_in_nu[rpacket_get_id (packet)];
storage->virt_packet_last_interaction_type[storage->virt_packet_count] = storage->last_interaction_type[rpacket_get_id (packet)];
storage->virt_packet_last_line_interaction_in_id[storage->virt_packet_count] = storage->last_line_interaction_in_id[rpacket_get_id (packet)];
storage->virt_packet_last_line_interaction_out_id[storage->virt_packet_count] = storage->last_line_interaction_out_id[rpacket_get_id (packet)];
storage->virt_packet_count += 1;
#ifdef WITHOPENMP
}
#endif // WITHOPENMP
#endif // WITH_VPACKET_LOGGING
if ((rpacket_get_nu(&virt_packet) < storage->spectrum_end_nu) &&
(rpacket_get_nu(&virt_packet) > storage->spectrum_start_nu))
{
#ifdef WITHOPENMP
#pragma omp critical
{
#endif // WITHOPENMP
int64_t virt_id_nu =
floor ((rpacket_get_nu(&virt_packet) -
storage->spectrum_start_nu) /
storage->spectrum_delta_nu);
storage->spectrum_virt_nu[virt_id_nu] +=
rpacket_get_energy(&virt_packet) * weight;
#ifdef WITHOPENMP
}
#endif // WITHOPENMP
}
}
}
else
{
return 1;
}
}
return reabsorbed;
}
void
move_packet_across_shell_boundary (rpacket_t * packet,
storage_model_t * storage, double distance, rk_state *mt_state)
{
move_packet (packet, storage, distance);
if (rpacket_get_virtual_packet (packet) > 0)
{
double delta_tau_event = rpacket_get_chi_continuum(packet) * distance;
rpacket_set_tau_event (packet,
rpacket_get_tau_event (packet) +
delta_tau_event);
packet->compute_chi_bf = true;
}
else
{
rpacket_reset_tau_event (packet, mt_state);
}
if ((rpacket_get_current_shell_id (packet) < storage->no_of_shells - 1
&& rpacket_get_next_shell_id (packet) == 1)
|| (rpacket_get_current_shell_id (packet) > 0
&& rpacket_get_next_shell_id (packet) == -1))
{
rpacket_set_current_shell_id (packet,
rpacket_get_current_shell_id (packet) +
rpacket_get_next_shell_id (packet));
}
else if (rpacket_get_next_shell_id (packet) == 1)
{
rpacket_set_status (packet, TARDIS_PACKET_STATUS_EMITTED);
}
else if ((storage->reflective_inner_boundary == 0) ||
(rk_double (mt_state) > storage->inner_boundary_albedo))
{
rpacket_set_status (packet, TARDIS_PACKET_STATUS_REABSORBED);
}
else
{
double doppler_factor = rpacket_doppler_factor (packet, storage);
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
double comov_energy = rpacket_get_energy (packet) * doppler_factor;
// TODO: correct
rpacket_set_mu (packet, rk_double (mt_state));
double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage);
rpacket_set_nu (packet, comov_nu * inverse_doppler_factor);
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
if (rpacket_get_virtual_packet_flag (packet) > 0)
{
montecarlo_one_packet (storage, packet, -2, mt_state);
}
}
}
void
montecarlo_thomson_scatter (rpacket_t * packet, storage_model_t * storage,
double distance, rk_state *mt_state)
{
move_packet (packet, storage, distance);
double doppler_factor = rpacket_doppler_factor (packet, storage);
double comov_nu = rpacket_get_nu (packet) * doppler_factor;
double comov_energy = rpacket_get_energy (packet) * doppler_factor;
rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0);
double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage);
rpacket_set_nu (packet, comov_nu * inverse_doppler_factor);
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
rpacket_reset_tau_event (packet, mt_state);
storage->last_interaction_type[rpacket_get_id (packet)] = 1;
angle_aberration_CMF_to_LF (packet, storage);
if (rpacket_get_virtual_packet_flag (packet) > 0)
{
create_vpacket (storage, packet, mt_state);
}
}
void
montecarlo_bound_free_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state)
{
// current position in list of continuum edges -> indicates which bound-free processes are possible
int64_t ccontinuum = rpacket_get_current_continuum_id (packet);
// Determine in which continuum the bf-absorption occurs
double chi_bf = rpacket_get_chi_boundfree (packet);
double zrand = rk_double (mt_state);
double zrand_x_chibf = zrand * chi_bf;
while ((ccontinuum < storage->no_of_edges - 1) && (packet->chi_bf_tmp_partial[ccontinuum] <= zrand_x_chibf))
{
ccontinuum++;
}
rpacket_set_current_continuum_id (packet, ccontinuum);
/* For consistency reasons the branching between ionization and thermal energy is determined using the
comoving frequency at the initial position instead of the frequency at the point of interaction */
double comov_nu = rpacket_get_nu (packet) * rpacket_doppler_factor (packet, storage);
/* Move the packet to the place of absorption, select a direction for re-emission and impose energy conservation
in the co-moving frame. */
move_packet (packet, storage, distance);
double old_doppler_factor = rpacket_doppler_factor (packet, storage);
rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0);
double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage);
double comov_energy = rpacket_get_energy (packet) * old_doppler_factor;
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
storage->last_interaction_type[rpacket_get_id (packet)] = 3; // last interaction was a bf-absorption
storage->last_line_interaction_in_id[rpacket_get_id (packet)] = ccontinuum;
// Convert the rpacket to thermal or ionization energy
zrand = rk_double (mt_state);
int64_t activate_level = (zrand < storage->continuum_list_nu[ccontinuum] / comov_nu) ?
storage->cont_edge2macro_level[ccontinuum] : storage->kpacket2macro_level;
rpacket_set_macro_atom_activation_level (packet, activate_level);
macro_atom (packet, storage, mt_state);
}
void
montecarlo_free_free_scatter (rpacket_t * packet, storage_model_t * storage, double distance, rk_state *mt_state)
{
/* Move the packet to the place of absorption, select a direction for re-emission and impose energy conservation
in the co-moving frame. */
move_packet (packet, storage, distance);
double old_doppler_factor = rpacket_doppler_factor (packet, storage);
rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0);
double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage);
double comov_energy = rpacket_get_energy (packet) * old_doppler_factor;
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
storage->last_interaction_type[rpacket_get_id (packet)] = 4; // last interaction was a ff-absorption
// Create a k-packet
rpacket_set_macro_atom_activation_level (packet, storage->kpacket2macro_level);
macro_atom (packet, storage, mt_state);
}
double
sample_nu_free_free (const rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state)
{
int64_t shell_id = rpacket_get_current_shell_id (packet);
double T = storage->t_electrons[shell_id];
double zrand = rk_double (mt_state);
return -KB * T / H * log(zrand); // Lucy 2003 MC II Eq.41
}
double
sample_nu_free_bound (const rpacket_t * packet, const storage_model_t * storage, rk_state *mt_state)
{
int64_t continuum_id = rpacket_get_current_continuum_id (packet);
double th_frequency = storage->continuum_list_nu[continuum_id];
int64_t shell_id = rpacket_get_current_shell_id (packet);
double T = storage->t_electrons[shell_id];
double zrand = rk_double (mt_state);
return th_frequency * (1 - (KB * T / H / th_frequency * log(zrand))); // Lucy 2003 MC II Eq.26
}
void
montecarlo_line_scatter (rpacket_t * packet, storage_model_t * storage,
double distance, rk_state *mt_state)
{
uint64_t next_line_id = rpacket_get_next_line_id (packet);
uint64_t line2d_idx = next_line_id +
storage->no_of_lines * rpacket_get_current_shell_id (packet);
if (rpacket_get_virtual_packet (packet) == 0)
{
increment_j_blue_estimator (packet, storage, distance, line2d_idx);
increment_Edotlu_estimator (packet, storage, distance, line2d_idx);
}
double tau_line =
storage->line_lists_tau_sobolevs[line2d_idx];
double tau_continuum = rpacket_get_chi_continuum(packet) * distance;
double tau_combined = tau_line + tau_continuum;
//rpacket_set_next_line_id (packet, rpacket_get_next_line_id (packet) + 1);
if (next_line_id + 1 == storage->no_of_lines)
{
rpacket_set_last_line (packet, true);
}
if (rpacket_get_virtual_packet (packet) > 0)
{
rpacket_set_tau_event (packet,
rpacket_get_tau_event (packet) + tau_line);
rpacket_set_next_line_id (packet, next_line_id + 1);
test_for_close_line (packet, storage);
}
else if (rpacket_get_tau_event (packet) < tau_combined)
{ // Line absorption occurs
move_packet (packet, storage, distance);
double old_doppler_factor = rpacket_doppler_factor (packet, storage);
rpacket_set_mu (packet, 2.0 * rk_double (mt_state) - 1.0);
double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage);
double comov_energy = rpacket_get_energy (packet) * old_doppler_factor;
rpacket_set_energy (packet, comov_energy * inverse_doppler_factor);
storage->last_interaction_in_nu[rpacket_get_id (packet)] =
rpacket_get_nu (packet);
storage->last_line_interaction_in_id[rpacket_get_id (packet)] =
next_line_id;
storage->last_line_interaction_shell_id[rpacket_get_id (packet)] =
rpacket_get_current_shell_id (packet);
storage->last_interaction_type[rpacket_get_id (packet)] = 2;
if (storage->line_interaction_id == 0)
{
line_emission (packet, storage, next_line_id, mt_state);
}
else if (storage->line_interaction_id >= 1)
{
rpacket_set_macro_atom_activation_level (packet,
storage->line2macro_level_upper[next_line_id]);
macro_atom (packet, storage, mt_state);
}
}
else
{ // Packet passes line without interacting
rpacket_set_tau_event (packet,
rpacket_get_tau_event (packet) - tau_line);
rpacket_set_next_line_id (packet, next_line_id + 1);
packet->compute_chi_bf = false;
test_for_close_line (packet, storage);
}
}
void
line_emission (rpacket_t * packet, storage_model_t * storage, int64_t emission_line_id, rk_state *mt_state)
{
double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage);
storage->last_line_interaction_out_id[rpacket_get_id (packet)] = emission_line_id;
if (storage->cont_status == CONTINUUM_ON)
{
storage->last_interaction_out_type[rpacket_get_id (packet)] = 2;
}
rpacket_set_nu (packet,
storage->line_list_nu[emission_line_id] * inverse_doppler_factor);
rpacket_set_nu_line (packet, storage->line_list_nu[emission_line_id]);
rpacket_set_next_line_id (packet, emission_line_id + 1);
rpacket_reset_tau_event (packet, mt_state);
angle_aberration_CMF_to_LF (packet, storage);
if (rpacket_get_virtual_packet_flag (packet) > 0)
{
bool virtual_close_line = false;
if (!rpacket_get_last_line (packet) &&
fabs (storage->line_list_nu[rpacket_get_next_line_id (packet)] -
rpacket_get_nu_line (packet)) <
(rpacket_get_nu_line (packet)* 1e-7))
{
virtual_close_line = true;
}
// QUESTIONABLE!!!
bool old_close_line = rpacket_get_close_line (packet);
rpacket_set_close_line (packet, virtual_close_line);
create_vpacket (storage, packet, mt_state);
rpacket_set_close_line (packet, old_close_line);
virtual_close_line = false;
}
test_for_close_line (packet, storage);
}
void test_for_close_line (rpacket_t * packet, const storage_model_t * storage)
{
if (!rpacket_get_last_line (packet) &&
fabs (storage->line_list_nu[rpacket_get_next_line_id (packet)] -
rpacket_get_nu_line (packet)) < (rpacket_get_nu_line (packet)*
1e-7))
{
rpacket_set_close_line (packet, true);
}
}
void
continuum_emission (rpacket_t * packet, storage_model_t * storage, rk_state *mt_state,
pt2sample_nu sample_nu_continuum, int64_t emission_type_id)
{
double inverse_doppler_factor = rpacket_inverse_doppler_factor (packet, storage);
double nu_comov = sample_nu_continuum (packet, storage, mt_state);
rpacket_set_nu (packet, nu_comov * inverse_doppler_factor);
rpacket_reset_tau_event (packet, mt_state);
storage->last_interaction_out_type[rpacket_get_id (packet)] = emission_type_id;
// Have to find current position in line list
int64_t current_line_id;
line_search (storage->line_list_nu, nu_comov, storage->no_of_lines, ¤t_line_id);
bool last_line = (current_line_id == storage->no_of_lines);
rpacket_set_last_line (packet, last_line);
rpacket_set_next_line_id (packet, current_line_id);
angle_aberration_CMF_to_LF (packet, storage);
if (rpacket_get_virtual_packet_flag (packet) > 0)
{
create_vpacket (storage, packet, mt_state);
}
}
static void
montecarlo_compute_distances (rpacket_t * packet, storage_model_t * storage)
{
// Check if the last line was the same nu as the current line.
if (rpacket_get_close_line (packet))
{
// If so set the distance to the line to 0.0
rpacket_set_d_line (packet, 0.0);
// Reset close_line.
rpacket_set_close_line (packet, false);
}
else
{
compute_distance2boundary (packet, storage);
compute_distance2line (packet, storage);
// FIXME MR: return status of compute_distance2line() is ignored
compute_distance2continuum (packet, storage);
}
}
montecarlo_event_handler_t
get_event_handler (rpacket_t * packet, storage_model_t * storage,
double *distance, rk_state *mt_state)
{
montecarlo_compute_distances (packet, storage);
double d_boundary = rpacket_get_d_boundary (packet);
double d_continuum = rpacket_get_d_continuum (packet);
double d_line = rpacket_get_d_line (packet);
montecarlo_event_handler_t handler;
if (d_line <= d_boundary && d_line <= d_continuum)
{
*distance = d_line;
handler = &montecarlo_line_scatter;
}
else if (d_boundary <= d_continuum)
{
*distance = d_boundary;
handler = &move_packet_across_shell_boundary;
}
else
{
*distance = d_continuum;
handler = montecarlo_continuum_event_handler (packet, storage, mt_state);
}
return handler;
}
montecarlo_event_handler_t
montecarlo_continuum_event_handler (rpacket_t * packet, storage_model_t * storage, rk_state *mt_state)
{
if (storage->cont_status)
{
double zrand_x_chi_cont = rk_double (mt_state) * rpacket_get_chi_continuum (packet);
double chi_th = rpacket_get_chi_electron (packet);
double chi_bf = rpacket_get_chi_boundfree (packet);
if (zrand_x_chi_cont < chi_th)
{
return &montecarlo_thomson_scatter;
}
else if (zrand_x_chi_cont < chi_th + chi_bf)
{
return &montecarlo_bound_free_scatter;
}
else
{
return &montecarlo_free_free_scatter;
}
}
else
{
return &montecarlo_thomson_scatter;
}
}
int64_t
montecarlo_one_packet_loop (storage_model_t * storage, rpacket_t * packet,
int64_t virtual_packet, rk_state *mt_state)
{
rpacket_set_tau_event (packet, 0.0);
rpacket_set_nu_line (packet, 0.0);
rpacket_set_virtual_packet (packet, virtual_packet);
rpacket_set_status (packet, TARDIS_PACKET_STATUS_IN_PROCESS);
// Initializing tau_event if it's a real packet.
if (virtual_packet == 0)
{
rpacket_reset_tau_event (packet,mt_state);
}
// For a virtual packet tau_event is the sum of all the tau's that the packet passes.
while (rpacket_get_status (packet) == TARDIS_PACKET_STATUS_IN_PROCESS)
{
// Check if we are at the end of line list.
if (!rpacket_get_last_line (packet))
{
rpacket_set_nu_line (packet,
storage->
line_list_nu[rpacket_get_next_line_id
(packet)]);
}
double distance;
get_event_handler (packet, storage, &distance, mt_state) (packet, storage,
distance, mt_state);
if (virtual_packet > 0 && rpacket_get_tau_event (packet) > storage->tau_russian)
{
double event_random = rk_double (mt_state);
if (event_random > storage->survival_probability)
{
rpacket_set_energy(packet, 0.0);
rpacket_set_status (packet, TARDIS_PACKET_STATUS_EMITTED);
}
else
{
rpacket_set_energy(packet,
rpacket_get_energy (packet) / storage->survival_probability *
exp (-1.0 * rpacket_get_tau_event (packet)));
rpacket_set_tau_event (packet, 0.0);
}
}
}
if (virtual_packet > 0)
{
rpacket_set_energy (packet,
rpacket_get_energy (packet) * exp (-1.0 *
rpacket_get_tau_event
(packet)));
}
return rpacket_get_status (packet) ==
TARDIS_PACKET_STATUS_REABSORBED ? 1 : 0;
}
void
montecarlo_main_loop(storage_model_t * storage, int64_t virtual_packet_flag, int nthreads, unsigned long seed)
{
int64_t finished_packets = 0;
storage->virt_packet_count = 0;
#ifdef WITH_VPACKET_LOGGING
storage->virt_packet_nus = (double *)safe_malloc(sizeof(double) * storage->no_of_packets);
storage->virt_packet_energies = (double *)safe_malloc(sizeof(double) * storage->no_of_packets);
storage->virt_packet_last_interaction_in_nu = (double *)safe_malloc(sizeof(double) * storage->no_of_packets);
storage->virt_packet_last_interaction_type = (int64_t *)safe_malloc(sizeof(int64_t) * storage->no_of_packets);
storage->virt_packet_last_line_interaction_in_id = (int64_t *)safe_malloc(sizeof(int64_t) * storage->no_of_packets);
storage->virt_packet_last_line_interaction_out_id = (int64_t *)safe_malloc(sizeof(int64_t) * storage->no_of_packets);
storage->virt_array_size = storage->no_of_packets;
#endif // WITH_VPACKET_LOGGING
#ifdef WITHOPENMP
omp_set_dynamic(0);
if (nthreads > 0)
{
omp_set_num_threads(nthreads);
}
#pragma omp parallel firstprivate(finished_packets)
{
rk_state mt_state;
rk_seed (seed + omp_get_thread_num(), &mt_state);
#pragma omp master
{
fprintf(stderr, "Running with OpenMP - %d threads\n", omp_get_num_threads());
print_progress(0, storage->no_of_packets);
}
#else
rk_state mt_state;
rk_seed (seed, &mt_state);
fprintf(stderr, "Running without OpenMP\n");
#endif
int64_t chi_bf_tmp_size = (storage->cont_status) ? storage->no_of_edges : 0;
double *chi_bf_tmp_partial = safe_malloc(sizeof(double) * chi_bf_tmp_size);
#pragma omp for
for (int64_t packet_index = 0; packet_index < storage->no_of_packets; ++packet_index)
{
int reabsorbed = 0;
rpacket_t packet;
rpacket_set_id(&packet, packet_index);
rpacket_init(&packet, storage, packet_index, virtual_packet_flag, chi_bf_tmp_partial);
if (virtual_packet_flag > 0)
{
reabsorbed = montecarlo_one_packet(storage, &packet, -1, &mt_state);
}
reabsorbed = montecarlo_one_packet(storage, &packet, 0, &mt_state);
storage->output_nus[packet_index] = rpacket_get_nu(&packet);
if (reabsorbed == 1)
{
storage->output_energies[packet_index] = -rpacket_get_energy(&packet);
}
else
{
storage->output_energies[packet_index] = rpacket_get_energy(&packet);
}
if ( ++finished_packets%100 == 0 )
{
#ifdef WITHOPENMP
// WARNING: This only works with a static sheduler and gives an approximation of progress.
// The alternative would be to have a shared variable but that could potentially decrease performance when using many threads.
if (omp_get_thread_num() == 0 )
print_progress(finished_packets * omp_get_num_threads(), storage->no_of_packets);
#else
print_progress(finished_packets, storage->no_of_packets);
#endif
}
}
free(chi_bf_tmp_partial);
#ifdef WITHOPENMP
}
#endif
print_progress(storage->no_of_packets, storage->no_of_packets);
fprintf(stderr,"\n");
}
void
create_vpacket (storage_model_t * storage, rpacket_t * packet,
rk_state *mt_state)
{
if (storage->enable_biasing)
{
int64_t shell_id = rpacket_get_current_shell_id(packet);
double tau_bias = (storage->tau_bias[shell_id + 1] +
(storage->tau_bias[shell_id] - storage->tau_bias[shell_id + 1]) *
(storage->r_outer[shell_id] - rpacket_get_r (packet)) /
(storage->r_outer[shell_id] - storage->r_inner[shell_id]));
double vpacket_prob = exp(-tau_bias);
double event_random = rk_double (mt_state);
if (event_random < vpacket_prob)
{
packet->vpacket_weight = 1. / vpacket_prob;
montecarlo_one_packet (storage, packet, 1, mt_state);
}
}
else
{
montecarlo_one_packet (storage, packet, 1, mt_state);
}
}
|
random.h | #ifndef RANDOM_H_INCLUDED
#define RANDOM_H_INCLUDED
#include <stdlib.h>
#include <assert.h>
uint32_t randomUint(unsigned, unsigned);
// // See random.cpp for notes.
// #include <cstdint>
// #include <climits>
// struct RandomUintGenerator{
// private:
// // for the Marsaglia algorithm
// uint32_t rngx;
// uint32_t rngy;
// uint32_t rngz;
// uint32_t rngc;
// // for the Jenkins algorithm
// uint32_t a, b, c, d;
// public:
// void initialize(); // must be called to seed the RNG
// uint32_t operator()();
// unsigned operator()(unsigned min, unsigned max);
// };
// // The globally-scoped random number generator. Declaring it
// // threadprivate causes each thread to instantiate a private instance.
// extern RandomUintGenerator randomUint;
// #pragma omp threadprivate(randomUint)
// constexpr uint32_t RANDOM_UINT_MAX = 0xffffffff;
#endif // RANDOM_H_INCLUDED
|
FileParser.h | //
// Created by Timm Felden on 04.11.15.
//
#ifndef SKILL_CPP_COMMON_FILEPARSER_H_H
#define SKILL_CPP_COMMON_FILEPARSER_H_H
#include "../common.h"
#include "../api/SkillFile.h"
#include "ParseException.h"
#include "../streams/FileInputStream.h"
#include "StringPool.h"
#include "AbstractStoragePool.h"
#include "../restrictions/FieldRestriction.h"
#include "../restrictions/TypeRestriction.h"
#include "../fieldTypes/BuiltinFieldType.h"
#include "../fieldTypes/AnnotationType.h"
#include "LazyField.h"
#include <vector>
#include <unordered_map>
#include <string>
#include <iostream>
#include <cassert>
#if defined(_OPENMP)
#include <omp.h>
#endif
/**
* set to 1, to enable debug output; this should be disabled on all commits
*/
#define debugOnly if(0)
namespace skill {
using namespace streams;
using namespace fieldTypes;
using namespace restrictions;
namespace internal {
/**
* Turns a field type into a preliminary type information. In case of user types, the declaration
* of the respective user type may follow after the field declaration.
*/
inline const FieldType *parseFieldType(FileInputStream *in,
const std::vector<AbstractStoragePool *> *types,
StringPool *String,
AnnotationType *Annotation,
int blockCounter) {
const TypeID i = (TypeID) in->v64();
switch (i) {
case 0 :
return new ConstantI8(in->i8());
case 1 :
return new ConstantI16(in->i16());
case 2 :
return new ConstantI32(in->i32());
case 3 :
return new ConstantI64(in->i64());
case 4 :
return new ConstantV64(in->v64());
case 5 :
return Annotation;
case 6 :
return &BoolType;
case 7 :
return &I8;
case 8 :
return &I16;
case 9 :
return &I32;
case 10:
return &I64;
case 11:
return &V64;
case 12:
return &F32;
case 13:
return &F64;
case 14:
return String;
case 15: {
int64_t length = in->v64();
auto t = parseFieldType(in, types, String, Annotation, blockCounter);
return new ConstantLengthArray(length, t);
}
case 17:
return new VariableLengthArray(parseFieldType(in, types, String, Annotation, blockCounter));
case 18:
return new ListType(parseFieldType(in, types, String, Annotation, blockCounter));
case 19:
return new SetType(parseFieldType(in, types, String, Annotation, blockCounter));
case 20:
return new MapType(parseFieldType(in, types, String, Annotation, blockCounter),
parseFieldType(in, types, String, Annotation, blockCounter));
default:
if (i >= 32 && i - 32 < (TypeID) types->size())
return types->at(i - 32);
else
throw ParseException(in, blockCounter,
"Invalid type ID");
}
}
/**
* create a new empty skill file; parametrized by specification dependent functionality.
*/
template<
//!ensures that names of pools and known fields are known upfront, so that it is safe
// to compare their names by pointer value
StringPool *initializeStrings(FileInputStream *),
//!create a new pool in the target type system
AbstractStoragePool *newPool(TypeID typeID,
String name,
AbstractStoragePool *superPool,
std::set<TypeRestriction *> *restrictions,
const AbstractStringKeeper *const keeper),
//! create a new state in the target type system
SkillFile *makeState(FileInputStream *in,
WriteMode mode,
StringPool *String,
AnnotationType *Annotation,
std::vector<AbstractStoragePool *> *types,
api::typeByName_t *typesByName,
std::vector<std::unique_ptr<MappedInStream>> &dataList)
>
SkillFile *newFile(const std::string &path, WriteMode mode) {
FileInputStream *in = new FileInputStream(path, "w");
StringPool *String = initializeStrings(in);
std::vector<AbstractStoragePool *> *types =
new std::vector<AbstractStoragePool *>;
AnnotationType *Annotation = new AnnotationType(types);
api::typeByName_t *typesByName = new api::typeByName_t;
std::vector<std::unique_ptr<MappedInStream>> dataList;
return makeState(in, mode, String,
Annotation, types,
typesByName,
dataList);
}
/**
* parses a skill file; parametrized by specification dependent functionality.
*/
template<
//!ensures that names of pools and known fields are known upfront, so that it is safe
// to compare their names by pointer value
StringPool *initializeStrings(FileInputStream *),
//!create a new pool in the target type system
AbstractStoragePool *newPool(TypeID typeID,
String name,
AbstractStoragePool *superPool,
std::set<TypeRestriction *> *restrictions,
const AbstractStringKeeper *const keeper ),
//! create a new state in the target type system
SkillFile *makeState(FileInputStream *in,
WriteMode mode,
StringPool *String,
AnnotationType *Annotation,
std::vector<AbstractStoragePool *> *types,
api::typeByName_t *typesByName,
std::vector<std::unique_ptr<MappedInStream>> &dataList)
>
SkillFile *parseFile(std::unique_ptr<FileInputStream> in, WriteMode mode) {
struct LFEntry {
LFEntry(AbstractStoragePool *const pool, SKilLID count)
: pool(pool), count(count) {}
AbstractStoragePool *const pool;
const SKilLID count;
};
// PARSE STATE
std::unique_ptr<StringPool> String(initializeStrings(in.get()));
std::vector<AbstractStoragePool *> *types =
new std::vector<AbstractStoragePool *>;
std::unique_ptr<AnnotationType> Annotation(new AnnotationType(types));
std::unique_ptr<api::typeByName_t> typesByName(new api::typeByName_t);
std::vector<std::unique_ptr<MappedInStream>> dataList;
// process stream
debugOnly {
std::cout << std::endl << "file " << in->getPath() << std::endl;
}
for (int blockCounter = 0; !in->eof(); blockCounter++) {
debugOnly {
std::cout << "block " << blockCounter << " starting at " << in->getPosition() << std::endl;
}
// string block
try {
const int count = (int) in->v64();
debugOnly {
std::cout << count << " strings" << std::endl;
}
if (0 != count) {
int last = 0, offset = 0;
const long position = in->getPosition() + 4 * count;
for (int i = count; i != 0; i--) {
offset = in->i32();
String->addPosition(std::pair<long, int>(position + last, offset - last));
last = offset;
}
in->jump(in->getPosition() + last);
}
} catch (SkillException e) {
throw ParseException(in, blockCounter, "corrupted string block");
}
debugOnly {
std::cout << "string block ended at " << in->getPosition() << std::endl;
}
// type block
try {
TypeID typeCount = (TypeID) in->v64();
// this barrier is strictly increasing inside of each block and reset to 0 at the beginning of each block
TypeID blockIDBarrier = 0;
std::set<api::String> seenTypes;
// number of fields to expect for that type in this block
std::vector<LFEntry> localFields;
// parse type definitions
while (typeCount-- > 0) {
api::String name = String->get((SKilLID) in->v64());
// check null name
if (nullptr == name)
throw ParseException(in, blockCounter,
"Corrupted file, nullptr in typename");
debugOnly {
std::cout << "processing type " << *name << " at " << in->getPosition()
<< std::endl;
}
// check duplicate types
if (seenTypes.find(name) != seenTypes.end())
throw ParseException(
in, blockCounter,
std::string("Duplicate definition of type ").append(*name));
seenTypes.insert(name);
const int count = (int) in->v64();
auto defIter = typesByName->find(name);
if (defIter == typesByName->end()) {
// unknown type
// type restrictions
int restrictionCount = (int) in->v64();
auto rest = std::unique_ptr<std::set<TypeRestriction *>>(new std::set<TypeRestriction *>);
//! TODO restrictions
// rest.sizeHint(restrictionCount)
while (restrictionCount-- > 0) {
switch ((char) in->v64()) {
case 0: //restrictions.Unique
break;
case 1: // restrictions.Singleton
break;
case 2: // restrictions.Monotone
break;
case 3: // restrictions.Abstract
break;
case 5:
in->v64(); // restrictions.DefaultTypeRestriction(in.v64.toInt)
break;
default:
ParseException(
in, blockCounter,
"Found an unknown type restriction. Please regenerate your binding, if possible.");
}
// TODO rest +=
}
// super
const TypeID superID = (TypeID) in->v64();
AbstractStoragePool *superPool;
if (0 == superID)
superPool = nullptr;
else if (superID > (TypeID) types->size()) {
throw ParseException(
in, blockCounter,
std::string("Type ").append(*name).append(
" refers to an ill-formed super type."));
} else {
superPool = types->at(superID - 1);
assert(superPool);
}
// allocate pool
AbstractStoragePool *r = newPool(
(TypeID) types->size() + 32, name, superPool, rest.get(), String->keeper);
rest.release();
types->push_back(r);
defIter = typesByName->insert(
std::pair<api::String, AbstractStoragePool *>(name, r)).first;
}
AbstractStoragePool *const definition = defIter->second;
if (blockIDBarrier < definition->typeID)
blockIDBarrier = definition->typeID;
else
throw ParseException(in, blockCounter, "Found unordered type block.");
// in contrast to prior implementation, bpo is the position inside of data, even if there are no actual
// instances. We need this behavior, because that way we can cheaply calculate the number of static instances
const SKilLID lbpo =
definition->basePool->cachedSize + (nullptr == definition->superPool ? 0 : (
0 != count ? (SKilLID) in->v64() :
definition->superPool->blocks.back().bpo));
// ensure that bpo is in fact inside of the parents block
if (definition->superPool) {
const auto &b = definition->superPool->blocks.back();
if (lbpo < b.bpo || b.bpo + b.dynamicCount < lbpo)
throw ParseException(in, blockCounter,
"Found broken bpo.");
}
// static count and cached size are updated in the resize phase
// @note we assume that all dynamic instance are static instances as well, until we know for sure
definition->blocks.push_back(Block(blockCounter, lbpo, count, count));
definition->staticDataInstances += count;
localFields.push_back(LFEntry(definition, (SKilLID) in->v64()));
}
// resize pools, i.e. update cachedSize and staticCount
for (auto &e : localFields) {
const auto p = e.pool;
const auto &b = p->blocks.back();
p->cachedSize += b.dynamicCount;
if (0 != b.dynamicCount) {
// calculate static count of our parent
const auto &parent = p->superPool;
if (parent) {
auto &sb = parent->blocks.back();
// assumed static instances, minus what static instances would be, if p were the first sub pool.
const auto delta = sb.staticCount - (b.bpo - sb.bpo);
// if positive, then we have to subtract it from the assumed static count (local and global)
if (delta > 0) {
sb.staticCount -= delta;
parent->staticDataInstances -= delta;
}
}
}
}
// track offset information, so that we can create the block maps and jump to the next block directly after
// parsing field information
long dataEnd = 0L;
// parse fields
for (const auto &e : localFields) {
const auto &p = e.pool;
TypeID legalFieldIDBarrier = 1 + (TypeID) p->dataFields.size();
const auto &block = p->blocks.back();
auto localFieldCount = e.count;
while (localFieldCount-- > 0) {
const TypeID id = (TypeID) in->v64();
if (id <= 0 || legalFieldIDBarrier < id)
throw ParseException(in, blockCounter,
"Found an illegal field ID.");
long endOffset = 0;
if (id == legalFieldIDBarrier) {
// new field
legalFieldIDBarrier++;
const api::String fieldName = String->get((SKilLID) in->v64());
if (!fieldName)
throw ParseException(in, blockCounter,
"A field has a nullptr as name.");
debugOnly {
std::cout << "processing new field " << *p->name << "." << *fieldName
<< " at " << in->getPosition() << std::endl;
}
const auto t = parseFieldType(in.get(), types, String.get(), Annotation.get(),
blockCounter);
// parse field restrictions
std::set<const restrictions::FieldRestriction *> rest;
int fieldRestrictionCount = (int) in->v64();
for (; fieldRestrictionCount != 0; fieldRestrictionCount--) {
const int i = (const int) in->v64();
switch (i) {
case 0: {// nonnull
rest.insert(restrictions::NonNull::get());
break;
}
case 1: {// default
if (5 == t->typeID || 32 <= t->typeID)
in->v64();
else
t->read(*in);
break;
}
case 3: {
//range
switch (t->typeID) {
case 7:
rest.insert(new restrictions::Range<int8_t>(in->i8(), in->i8()));
break;
case 8:
rest.insert(new restrictions::Range<int16_t>(in->i16(), in->i16()));
break;
case 9:
rest.insert(new restrictions::Range<int32_t>(in->i32(), in->i32()));
break;
case 10:
rest.insert(new restrictions::Range<int64_t>(in->i64(), in->i64()));
break;
case 11:
rest.insert(new restrictions::Range<int64_t>(in->v64(), in->v64()));
break;
case 12:
rest.insert(new restrictions::Range<float>(in->f32(), in->f32()));
break;
case 13:
rest.insert(new restrictions::Range<double>(in->f64(), in->f64()));
break;
default:
throw ParseException(
in, blockCounter,
"Range restricton on a type that can not be restricted.");
}
break;
}
case 5: { // coding
String->get((SKilLID) in->v64());
break;
}
case 7: {
// constant length pointer
break;
}
case 9: {
// oneof
// read array of type IDs
for (int c = in->v64(); c != 0; c--)
in->v64();
break;
}
default:
throw ParseException(
in, blockCounter,
"Found an unknown field restriction. Please regenerate your binding, if possible.");
}
}
endOffset = in->v64();
auto f = p->addField(String->keeper, id, t, fieldName);
for (auto r : rest)
f->addRestriction(r);
f->addChunk(
new BulkChunk(dataEnd, endOffset, p->cachedSize, p->blocks.size()));
} else {
// known field
endOffset = in->v64();
p->dataFields[id - 1]->addChunk(
new SimpleChunk(dataEnd, endOffset, block.dynamicCount, block.bpo));
}
dataEnd = endOffset;
}
}
debugOnly {
std::cout << "reached end of type header at " << in->getPosition() << std::endl;
}
// jump over data and continue in the next block
dataList.push_back(std::unique_ptr<MappedInStream>(in->jumpAndMap(dataEnd)));
} catch (SkillException e) {
throw e;
} catch (...) {
throw ParseException(in, blockCounter, "unexpected foreign exception");
}
}
// note there still isn't a single instance
return makeState(in.release(), mode, String.release(), Annotation.release(), types,
typesByName.release(),
dataList);
}
/**
* has to be called by make state after instances have been allocated to ensure
* that required fields are read from file
*/
inline void triggerFieldDeserialization(std::vector<AbstractStoragePool *> *types,
std::vector<std::unique_ptr<MappedInStream>> &dataList) {
std::vector<std::string *> results;
#pragma omp parallel for schedule(dynamic) num_threads(omp_get_max_threads()/2)
for (size_t i = 0; i < types->size(); i++) {
auto t = types->at(i);
#pragma omp parallel for schedule(dynamic) num_threads(2)
for (size_t j = 0; j < t->dataFields.size(); j++) {
auto f = t->dataFields[j];
int bsIndex = 0;
for (Chunk *dc : f->dataChunks) {
if (dynamic_cast<BulkChunk *>(dc)) {
// skip blocks that do not contain data for our field
bsIndex += ((BulkChunk *) dc)->blockCount - 1;
}
const int blockIndex = t->blocks[bsIndex++].blockIndex;
if (dc->count) {
MappedInStream *part = dataList[blockIndex].get();
skill::streams::MappedInStream in(part, dc->begin, dc->end);
try {
if (auto c = dynamic_cast<const ::skill::internal::SimpleChunk *>(dc)) {
int i = c->bpo + 1;
f->rsc(i, i + c->count, &in);
} else {
auto bc = dynamic_cast<const ::skill::internal::BulkChunk *>(dc);
f->rbc(&in, bc);
}
if (!(in.eof() || nullptr != dynamic_cast<::skill::internal::LazyField *>(f))) {
#pragma omp critical
{
std::stringstream message;
message << "ParseException while parsing field: "
<< f->owner->name->c_str()
<< "."
<< f->name->c_str()
<< "\n Position: "
<< in.getPosition()
<< "\n reason: Did not consume all bytes." << std::endl;
results.push_back(new std::string(message.str()));
}
};
} catch (SkillException e) {
#pragma omp critical
{
std::stringstream message;
message << "ParseException while parsing field.\n Position "
<< in.getPosition()
<< "\n reason: "
<< e.message << std::endl;
results.push_back(new std::string(message.str()));
}
} catch (...) {
#pragma omp critical
{
results.push_back(new std::string("unknown error in concurrent read"));
}
}
}
}
}
}
// check for errors
if (results.size()) {
std::stringstream msg;
for (const auto s : results) {
if (s) {
msg << *s << std::endl;
delete s;
}
}
throw SkillException(msg.str());
}
}
}
}
#undef debugOnly
#endif //SKILL_CPP_COMMON_FILEPARSER_H_H
|
image.h | #ifndef _IMAGE_H_
#define _IMAGE_H_
// simple image class
// ulrich.krispel@fraunhofer.at
#include <vector>
#include <cassert>
#include <limits>
#include "jpeglib.h"
template <class T> struct ImageT {
struct AABB2D {
int l, t, w, h;
inline AABB2D(int LE, int TO, int WI, int HE) : l(LE), t(TO), w(WI), h(HE) {}
inline int r() const { return l + w; }
inline int b() const { return t + h; }
inline double x2rel(double x) const { return (x - (double)l) / (double)w; }
inline double y2rel(double y) const { return (y - (double)t) / (double)h; }
inline double rel2x(double rx) const { return rx * w + l; }
inline double rel2y(double ry) const { return ry * h + t; }
};
protected:
unsigned int W;
unsigned int H;
unsigned int BPP;
std::vector<T> pdata; // pixel data
public:
typedef T PixelT;
ImageT() : W(0), H(0), BPP(0) {}
inline int bufoffset(const int x, const int y) const {
return (y * W + x) * (BPP / (sizeof(T) * 8));
}
inline unsigned int rgb(const int x, const int y) const {
unsigned const int off = bufoffset(x, y);
assert(off < pdata.size());
return pdata[off] | (pdata[off + 1] << 8) | (pdata[off + 2] << 16);
}
inline bool isValue(const int x, const int y, const T r, const T g,
const T b) const {
const int bo = bufoffset(x, y);
return ((pdata[bo] == r) && (pdata[bo + 1] == g) && (pdata[bo + 2] == b));
}
template <typename VTYPE> inline VTYPE rgbT(const int x, const int y) const {
unsigned const int off = bufoffset(x, y);
assert(off < pdata.size());
return VTYPE(pdata[off], pdata[off + 1], pdata[off + 2]);
}
template <typename VTYPE> inline void setRGB(const int x, const int y, VTYPE rgb)
{
unsigned const int off = bufoffset(x, y);
pdata[off] = rgb[0]; pdata[off+1] = rgb[1]; pdata[off+2] = rgb[2];
}
inline bool isValid() const { return W != 0 && H != 0 && BPP != 0; }
inline int width() const { return W; }
inline int height() const { return H; }
inline int bpp() const { return BPP; }
inline int channels() const { return BPP / (sizeof(T) * 8); }
inline const T *data() const { return &pdata[0]; }
inline const std::vector<T> &getData() const { return pdata; }
inline int buffersize() const { return bufoffset(0, H); }
inline AABB2D whole() const { return AABB2D(0, 0, W, H); }
inline std::vector<T> &unsafeData() { return pdata; }
inline void initialize(int width, int height, int bpp) {
W = width;
H = height;
BPP = bpp;
pdata.resize(buffersize());
}
inline void resize(int width, int height, int channels = 3) {
W = width;
H = height;
BPP = channels * (sizeof(T) * 8);
pdata.resize(buffersize());
}
// write access
inline T &operator()(const int offset) { return pdata[offset]; }
inline T &operator()(const int x, const int y, const int ch = 0) {
return pdata[bufoffset(x, y) + ch];
}
// read access
inline const T &operator()(const int x, const int y, const int ch = 0) const {
return pdata[bufoffset(x, y) + ch];
}
// bilinear interpolation using a single channel
template <typename VTYPE>
inline const VTYPE bilinear(const double x, const double y) const {
const int ix = (int)floor(x), iy = (int)floor(y);
const int l = ix < 0 ? 0 : ix;
const int r = l >= ((int)W - 1) ? ((int)W - 1) < 0 ? 0 : (int)W-1 : l + 1;
const int t = iy < 0 ? 0 : iy;
const int b = t >= ((int)H - 1) ? ((int)H - 1) < 0 ? 0 : (int)H-1 : t + 1;
if (!((l >= 0 && l <= (int)W && t >= 0 && t <= (int)H)
&& (r >= 0 && r <= (int)W && b >= 0 && b <= (int)H)))
{
std::cout << "l:" << l << " t:" << t << " r:" << r << " b:" << b << std::endl;
}
VTYPE q11 = rgbT<VTYPE>(l, t), q21 = rgbT<VTYPE>(r, t),
q12 = rgbT<VTYPE>(l, b), q22 = rgbT<VTYPE>(r, b);
return q11 * (r - x) * (b - y) + q21 * (x - l) * (b - y) +
q12 * (r - x) * (y - t) + q22 * (x - l) * (y - t);
}
// bilinear interpolation using a specific channel
template <typename VTYPE>
inline const VTYPE bilinear2(const double x, const double y,
const int ch = 0) const {
const int l = floor(x) < 0 ? 0 : (int)floor(x);
const int r = l >= ((int)W - 1) ? (int)W - 1 : l + 1;
const int t = floor(y) < 0 ? 0 : (int)floor(y);
const int b = t >= ((int)H - 1) ? (int)H - 1 : t + 1;
assert(l >= 0 && l <= (int)W && t >= 0 && t <= (int)H);
assert(r >= 0 && r <= (int)W && b >= 0 && b <= (int)(int)H);
VTYPE q11 = (*this)(l, t, ch), q21 = (*this)(r, t, ch),
q12 = (*this)(l, b, ch), q22 = (*this)(r, b, ch);
return q11 * (r - x) * (b - y) + q21 * (x - l) * (b - y) +
q12 * (r - x) * (y - t) + q22 * (x - l) * (y - t);
}
// clear image
void clear(T value = 0) { memset(&pdata[0], value, buffersize()); }
template <typename CALCTYPE> inline CALCTYPE mean(const int ch = 0) const {
CALCTYPE result = 0;
const int w = width(), h = height();
auto MeanCB = [this, &result, ch](int x, int y) {
result += (*this)(x, y, ch);
};
applyPixelPosCBS(MeanCB, whole());
result /= (CALCTYPE)(w * h);
return result;
}
template <typename CALCTYPE>
inline CALCTYPE std(const CALCTYPE m = mean<CALCTYPE>(),
const int ch = 0) const {
CALCTYPE result = 0;
auto StdCB = [this, &result, m, ch](int x, int y) {
CALCTYPE v = (CALCTYPE) (*this)(x, y, ch) - m;
result += v * v;
};
applyPixelPosCBS(StdCB, whole());
result /= (CALCTYPE)(W * H);
return sqrt(result);
}
inline PixelT min(const int ch = 0) const {
PixelT m = std::numeric_limits<PixelT>::max();
auto minCB = [&m](int x, int y, PixelT v) {
if (v < m)
m = v;
};
applyPixelCB(minCB, whole());
return m;
}
inline PixelT max(const int ch = 0) const {
PixelT m = std::numeric_limits<PixelT>::min();
auto maxCB = [&m](int x, int y, PixelT v) {
if (v > m)
m = v;
};
applyPixelCB(maxCB, whole());
return m;
}
// transforms (changes) each pixel
template <class CallBack>
inline void transformPixelCB(const CallBack &cb, const AABB2D &wnd) {
const int ch = channels();
for (int y = wnd.t; y < wnd.b(); ++y) {
for (int x = wnd.l; x < wnd.r(); ++x) {
for (int c = 0; c < ch; ++c)
(*this)(x, y, c) = cb((*this)(x, y, c));
}
}
}
// pixel callback (parallel)
template <class CallBack>
inline void applyPixelCB(const CallBack &cb, const AABB2D &wnd) const {
const int ch = channels();
#pragma omp parallel for
for (int y = wnd.t; y < wnd.b(); ++y) {
for (int x = wnd.l; x < wnd.r(); ++x) {
for (int c = 0; c < ch; ++c)
cb(x, y, (*this)(x, y, c));
}
}
}
// pixel callback (parallel)
template <class CallBack>
inline void applyPixelPosCB(const CallBack &cb, const AABB2D wnd) const {
#pragma omp parallel for
for (int y = wnd.t; y < wnd.b(); ++y) {
for (int x = wnd.l; x < wnd.r(); ++x) {
cb(x, y);
}
}
}
// pixel callback (not parallel)
template <class CallBack>
inline void applyPixelPosCBS(const CallBack &cb, const AABB2D wnd) const {
for (int y = wnd.t; y < wnd.b(); ++y) {
for (int x = wnd.l; x < wnd.r(); ++x) {
cb(x, y);
}
}
}
};
typedef ImageT<unsigned char> Image;
typedef ImageT<float> ImageF;
typedef ImageT<double> ImageD;
template <class SRCTYPE, class DSTTYPE>
DSTTYPE convertImage(const SRCTYPE &img, const double factor = 1.0) {
DSTTYPE result;
const int w = img.width(), h = img.height(), c = img.channels();
result.resize(w, h, img.channels());
auto CB = [&result,factor,&img](int x, int y, int c, typename SRCTYPE::PixelT v) {
result(x, y, c) = (DSTTYPE::PixelT)(img(x, y, c) * factor);
};
img.applyPixelCB(CB, img.whole());
return result;
}
//============================================= color conversion
template <class SRCTYPE, class DSTTYPE>
DSTTYPE convertToGrayScale(const SRCTYPE &img) {
DSTTYPE result;
const int w = img.width(), h = img.height();
result.resize(w, h, 1);
auto CB = [&img, &result](int x, int y) {
typename DSTTYPE::PixelT R = img(x, y, 0);
typename DSTTYPE::PixelT G = img(x, y, 1);
typename DSTTYPE::PixelT B = img(x, y, 2);
// convert using luminosity
result(x, y) = 0.21 * R + 0.72 * G + 0.07 * B;
};
result.applyPixelPosCB(CB, result.whole());
return result;
}
//============================================= resize
template <class IMGTYPE>
void resizeBlit(const IMGTYPE &src, const typename IMGTYPE::AABB2D &spos,
IMGTYPE &dst, const typename IMGTYPE::AABB2D &dpos) {
const int ch = src.channels();
auto CB = [&src, &dst, &spos, &dpos, ch](int x, int y) {
for (int c = 0; c < ch; ++c) {
double rx = dpos.x2rel(x), ry = dpos.y2rel(y);
double sx = spos.rel2x(rx), sy = spos.rel2y(ry);
if (x >= 0 && x < dst.width() && y >= 0 && y < dst.height())
dst(x, y, c) = (typename IMGTYPE::PixelT) src.template bilinear2<double>(sx, sy, c);
}
};
dst.applyPixelPosCB(CB, dpos);
}
//============================================= loading / writing using libJPEG
template <class IMGTYPE> bool loadJPEG(const char *fname, IMGTYPE &img) {
struct jpeg_decompress_struct cinfo;
struct jpeg_error_mgr jerr;
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_decompress(&cinfo);
FILE *infile = fopen(fname, "rb");
if (infile == NULL || ferror(infile)) {
fprintf(stderr, "error opening file %s for reading.\n", fname);
return false;
}
jpeg_stdio_src(&cinfo, infile);
jpeg_read_header(&cinfo, TRUE);
// assume RGB
img.resize(cinfo.image_width, cinfo.image_height);
jpeg_start_decompress(&cinfo);
// create vector with scanline start ptrs
std::vector<JSAMPROW> rowptr(cinfo.image_height);
for (unsigned int i = 0; i < cinfo.image_height; ++i) {
rowptr[i] = (&img(0, i)); // &m_data[i * cinfo.image_width * m_channels]
}
// read scanlines
while (cinfo.output_scanline < cinfo.output_height) {
jpeg_read_scanlines(&cinfo, &rowptr[cinfo.output_scanline], 10);
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(infile);
return true;
}
template <class IMGTYPE>
bool saveJPEG(const char *fname, const IMGTYPE &img, const int quality = 80) {
struct jpeg_compress_struct cinfo;
struct jpeg_error_mgr jerr;
FILE *outfile;
JSAMPROW row_pointer[1];
int row_stride;
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_compress(&cinfo);
if ((outfile = fopen(fname, "wb")) == NULL) {
fprintf(stderr, "error opening file %s for writing\n", fname);
return false;
}
jpeg_stdio_dest(&cinfo, outfile);
cinfo.image_width = img.width();
cinfo.image_height = img.height();
cinfo.input_components = img.channels();
cinfo.in_color_space = img.channels() == 3 ? JCS_RGB : JCS_GRAYSCALE;
jpeg_set_defaults(&cinfo);
jpeg_set_quality(&cinfo, quality, TRUE);
jpeg_start_compress(&cinfo, TRUE);
row_stride = img.width() * img.channels();
while (cinfo.next_scanline < cinfo.image_height) {
row_pointer[0] = (JSAMPROW)&img.getData()[cinfo.next_scanline * row_stride];
(void)jpeg_write_scanlines(&cinfo, row_pointer, 1);
}
jpeg_finish_compress(&cinfo);
fclose(outfile);
jpeg_destroy_compress(&cinfo);
return true;
}
#endif
|
opencl_keychain_fmt_plug.c | /*
* Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format.
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_keychain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_keychain);
#else
#include <string.h>
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "stdint.h"
#include "misc.h"
#include "options.h"
#include "jumbo.h"
#include "common-opencl.h"
#define FORMAT_LABEL "keychain-opencl"
#define FORMAT_NAME "Mac OS X Keychain"
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL 3DES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define SWAP(n) \
(((n) << 24) | (((n) & 0xff00) << 8) | (((n) >> 8) & 0xff00) | ((n) >> 24))
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(*salt_struct)
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_ALIGN 4
#define SALTLEN 20
#define IVLEN 8
#define CTLEN 48
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} keychain_password;
typedef struct {
uint32_t v[32/4];
} keychain_hash;
typedef struct {
uint32_t iterations;
uint32_t outlen;
uint32_t skip_bytes;
uint8_t length;
uint8_t salt[64];
} keychain_salt;
static int *cracked;
static int any_cracked;
static struct fmt_main *self;
static struct fmt_tests keychain_tests[] = {
{"$keychain$*10f7445c8510fa40d9ef6b4e0f8c772a9d37e449*f3d19b2a45cdcccb*8c3c3b1c7d48a24dad4ccbd4fd794ca9b0b3f1386a0a4527f3548bfe6e2f1001804b082076641bbedbc9f3a7c33c084b", "password"},
// these were generated with pass_gen.pl. NOTE, they ALL have the data (which gets encrypted) which was decrypted from the above hash.
{"$keychain$*a88cd6fbaaf40bc5437eee015a0f95ab8ab70545*b12372b1b7cb5c1f*1f5c596bcdd015afc126bc86f42dd092cb9d531d14a0aafaa89283f1bebace60562d497332afbd952fd329cc864144ec", "password"},
{"$keychain$*23328e264557b93204dc825c46a25f7fb1e17d4a*19a9efde2ca98d30*6ac89184134758a95c61bd274087ae0cffcf49f433c7f91edea98bd4fd60094e2936d99e4d985dec98284379f23259c0", "hhh"},
{"$keychain$*927717d8509db73aa47c5e820e3a381928b5e048*eef33a4a1483ae45*a52691580f17e295b8c2320947968503c605b2784bfe4851077782139f0de46f71889835190c361870baa56e2f4e9e43", "JtR-Jumbo"},
{"$keychain$*1fab88d0b8ea1a3d303e0aef519796eb29e46299*3358b0e77d60892f*286f975dcd191024227514ed9939d0fa94034294ba1eca6d5c767559e75e944b5a2fcb54fd696be64c64f9d069ce628a", "really long password -----------------------------"},
{NULL}
};
static struct custom_salt {
unsigned char salt[SALTLEN];
unsigned char iv[IVLEN];
unsigned char ct[CTLEN];
} *salt_struct;
static cl_int cl_error;
static keychain_password *inbuffer;
static keychain_hash *outbuffer;
static keychain_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
size_t insize, outsize, settingsize, cracked_size;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(keychain_password) * gws;
outsize = sizeof(keychain_hash) * gws;
settingsize = sizeof(keychain_salt);
cracked_size = sizeof(*cracked) * gws;
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
cracked = mem_calloc(1, cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
PLAINTEXT_LENGTH,
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self,
create_clobj, release_clobj,
sizeof(keychain_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
if (strncmp(ciphertext, "$keychain$*", 11) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 11;
if ((p = strtokm(ctcopy, "*")) == NULL) /* salt */
goto err;
if(hexlenl(p) != SALTLEN * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if(hexlenl(p) != IVLEN * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */
goto err;
if(hexlenl(p) != CTLEN * 2)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt *salt_struct;
if (!salt_struct)
salt_struct = mem_calloc_tiny(sizeof(struct custom_salt),
MEM_ALIGN_WORD);
ctcopy += 11; /* skip over "$keychain$*" */
p = strtokm(ctcopy, "*");
for (i = 0; i < SALTLEN; i++)
salt_struct->salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < IVLEN; i++)
salt_struct->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < CTLEN; i++)
salt_struct->ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)salt_struct;
}
static void set_salt(void *salt)
{
salt_struct = (struct custom_salt *)salt;
memcpy((char*)currentsalt.salt, salt_struct->salt, 20);
currentsalt.length = 20;
currentsalt.iterations = 1000;
currentsalt.outlen = 24;
currentsalt.skip_bytes = 0;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int kcdecrypt(unsigned char *key, unsigned char *iv, unsigned char *data)
{
unsigned char out[CTLEN];
DES_cblock key1, key2, key3;
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
memset(out, 0, sizeof(out));
memcpy(key1, key, 8);
memcpy(key2, key + 8, 8);
memcpy(key3, key + 16, 8);
DES_set_key((DES_cblock *) key1, &ks1);
DES_set_key((DES_cblock *) key2, &ks2);
DES_set_key((DES_cblock *) key3, &ks3);
memcpy(ivec, iv, 8);
DES_ede3_cbc_encrypt(data, out, CTLEN, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
/* possible bug here, is this assumption (pad of 4) always valid? */
if (out[47] != 4 || check_pkcs_pad(out, CTLEN, 8) < 0)
return -1;
return 0;
}
#if 0
//#ifdef DEBUG
static void print_hex(unsigned char *str, int len)
{
int i;
for (i = 0; i < len; ++i)
printf("%02x", str[i]);
printf("\n");
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
if (!kcdecrypt((unsigned char*)outbuffer[index].v,
salt_struct->iv, salt_struct->ct))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_opencl_keychain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT,
{ NULL },
keychain_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
H2Pack_build_periodic.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <time.h>
#include <omp.h>
#include "H2Pack_config.h"
#include "H2Pack_typedef.h"
#include "H2Pack_aux_structs.h"
#include "H2Pack_build_periodic.h"
#include "H2Pack_utils.h"
#include "utils.h"
// Build periodic block for root node
void H2P_build_periodic_block(H2Pack_p h2pack)
{
int pt_dim = h2pack->pt_dim;
int xpt_dim = h2pack->xpt_dim;
int krnl_dim = h2pack->krnl_dim;
int root_idx = h2pack->root_idx;
int n_lattice = h2pack->n_lattice;
void *krnl_param = h2pack->krnl_param;
void *pkrnl_param = h2pack->pkrnl_param;
DTYPE *enbox0_width = h2pack->enbox + (root_idx * (2 * pt_dim) + pt_dim);
DTYPE *per_lattices = h2pack->per_lattices;
H2P_dense_mat_p root_J_coord = h2pack->J_coord[root_idx];
H2P_dense_mat_p root_J_coord_s = h2pack->tb[0]->mat0;
H2P_dense_mat_p krnl_mat_blk = h2pack->tb[0]->mat1;
kernel_eval_fptr krnl_eval = h2pack->krnl_eval;
kernel_eval_fptr pkrnl_eval = h2pack->pkrnl_eval;
int n_point_root = root_J_coord->ncol;
int per_blk_size = n_point_root * krnl_dim;
DTYPE *per_blk = (DTYPE*) malloc_aligned(sizeof(DTYPE) * per_blk_size * per_blk_size, 64);
ASSERT_PRINTF(per_blk != NULL, "Failed to allocate periodic block of size %d^2\n", per_blk_size);
// O = pkernel({root_J_coord, root_J_coord});
pkrnl_eval(
root_J_coord->data, root_J_coord->ld, root_J_coord->ncol,
root_J_coord->data, root_J_coord->ld, root_J_coord->ncol,
pkrnl_param, per_blk, per_blk_size
);
DTYPE shift[8] = {0, 0, 0, 0, 0, 0, 0, 0};
H2P_dense_mat_resize(krnl_mat_blk, per_blk_size, per_blk_size);
H2P_dense_mat_resize(root_J_coord_s, xpt_dim, n_point_root);
copy_matrix_block(
sizeof(DTYPE), xpt_dim, n_point_root, root_J_coord->data, root_J_coord->ld,
root_J_coord_s->data, root_J_coord_s->ld
);
for (int l = 0; l < n_lattice; l++)
{
// shift = lattice(l, 1 : pt_dim) .* root_box(pt_dim+1 : 2 * pt_dim);
// shift = [shift, zeros(1, xpt_dim - pt_dim)];
DTYPE *lattice_l = per_lattices + l * pt_dim;
for (int j = 0; j < pt_dim; j++) shift[j] = enbox0_width[j] * lattice_l[j];
// root_J_coord_s = coord_shift(root_J_coord, shift, 1);
H2P_shift_coord(root_J_coord_s, shift, 1.0);
// O = O - kernel({root_J_coord, root_J_coord_s});
krnl_eval(
root_J_coord->data, root_J_coord->ld, root_J_coord->ncol,
root_J_coord_s->data, root_J_coord_s->ld, root_J_coord->ncol,
krnl_param, krnl_mat_blk->data, krnl_mat_blk->ld
);
#pragma omp simd
for (int i = 0; i < per_blk_size * per_blk_size; i++)
per_blk[i] -= krnl_mat_blk->data[i];
// Reset root_J_coord_s = root_J_coord
H2P_shift_coord(root_J_coord_s, shift, -1.0);
}
h2pack->per_blk = per_blk;
}
// Build H2 representation with a regular kernel function and
// a periodic system kernel (Ewald summation) function
void H2P_build_periodic(
H2Pack_p h2pack, H2P_dense_mat_p *pp, const int BD_JIT,
void *krnl_param, kernel_eval_fptr krnl_eval,
void *pkrnl_param, kernel_eval_fptr pkrnl_eval,
kernel_mv_fptr krnl_mv, const int krnl_mv_flops
)
{
double st, et;
double *timers = h2pack->timers;
if (pp == NULL)
{
ERROR_PRINTF("You need to provide a set of proxy points.\n");
return;
}
if (krnl_eval == NULL)
{
ERROR_PRINTF("You need to provide a valid krnl_eval().\n");
return;
}
if (BD_JIT != 1)
{
ERROR_PRINTF("Only support BD_JIT=1 in this function for the moment.\n");
return;
}
h2pack->pp = pp;
h2pack->BD_JIT = BD_JIT;
h2pack->krnl_param = krnl_param;
h2pack->krnl_eval = krnl_eval;
h2pack->pkrnl_param = pkrnl_param;
h2pack->pkrnl_eval = pkrnl_eval;
h2pack->krnl_mv = krnl_mv;
h2pack->krnl_bimv_flops = krnl_mv_flops - 2;
if (BD_JIT == 1 && krnl_mv == NULL)
WARNING_PRINTF("krnl_eval() will be used in BD_JIT matvec. For better performance, consider using a krnl_mv().\n");
// 1. Build projection matrices and skeleton row sets
st = get_wtime_sec();
H2P_build_H2_UJ_proxy(h2pack);
et = get_wtime_sec();
timers[U_BUILD_TIMER_IDX] = et - st;
// 2. Generate H2 generator matrices metadata
st = get_wtime_sec();
H2P_generate_B_metadata(h2pack);
et = get_wtime_sec();
timers[B_BUILD_TIMER_IDX] = et - st;
// 3. Generate H2 dense blocks metadata
st = get_wtime_sec();
H2P_generate_D_metadata(h2pack);
et = get_wtime_sec();
timers[D_BUILD_TIMER_IDX] = et - st;
// 4. Build periodic block for root node, add its timing to B build timing
st = get_wtime_sec();
H2P_build_periodic_block(h2pack);
et = get_wtime_sec();
timers[B_BUILD_TIMER_IDX] = et - st;
// 5. Set up forward and backward permutation indices
int n_point = h2pack->n_point;
int krnl_dim = h2pack->krnl_dim;
int *coord_idx = h2pack->coord_idx;
int *fwd_pmt_idx = (int*) malloc(sizeof(int) * n_point * krnl_dim);
int *bwd_pmt_idx = (int*) malloc(sizeof(int) * n_point * krnl_dim);
for (int i = 0; i < n_point; i++)
{
for (int j = 0; j < krnl_dim; j++)
{
fwd_pmt_idx[i * krnl_dim + j] = coord_idx[i] * krnl_dim + j;
bwd_pmt_idx[coord_idx[i] * krnl_dim + j] = i * krnl_dim + j;
}
}
h2pack->fwd_pmt_idx = fwd_pmt_idx;
h2pack->bwd_pmt_idx = bwd_pmt_idx;
}
|
GB_unaryop__ainv_fp32_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp32_uint8
// op(A') function: GB_tran__ainv_fp32_uint8
// C type: float
// A type: uint8_t
// cast: float cij = (float) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp32_uint8
(
float *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp32_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt)
{
kernel_tm.create(4 * 4, inch, outch);
// G
const float ktm[4][3] = {
{1.0f, 0.0f, 0.0f},
{1.0f / 2, 1.0f / 2, 1.0f / 2},
{1.0f / 2, -1.0f / 2, 1.0f / 2},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4 * 4, tiles, inch, 4u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
float* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 2;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
#if __AVX__
__m128 _d0, _d1, _d2, _d3;
__m128 _w0, _w1, _w2, _w3;
// load
_d0 = _mm_loadu_ps(r0);
_d1 = _mm_loadu_ps(r1);
_d2 = _mm_loadu_ps(r2);
_d3 = _mm_loadu_ps(r3);
// w = B_t * d
_w0 = _mm_sub_ps(_d0, _d2);
_w1 = _mm_add_ps(_d1, _d2);
_w2 = _mm_sub_ps(_d2, _d1);
_w3 = _mm_sub_ps(_d3, _d1);
// transpose d to d_t
_MM_TRANSPOSE4_PS(_w0, _w1, _w2, _w3);
// d = B_t * d_t
_d0 = _mm_sub_ps(_w0, _w2);
_d1 = _mm_add_ps(_w1, _w2);
_d2 = _mm_sub_ps(_w2, _w1);
_d3 = _mm_sub_ps(_w3, _w1);
// save to out_tm
_mm_storeu_ps(out_tm0, _d0);
_mm_storeu_ps(out_tm0 + 4, _d1);
_mm_storeu_ps(out_tm0 + 8, _d2);
_mm_storeu_ps(out_tm0 + 12, _d3);
#else
float d0[4], d1[4], d2[4], d3[4];
float w0[4], w1[4], w2[4], w3[4];
float t0[4], t1[4], t2[4], t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
}
// d = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n] = d0[n];
out_tm0[n + 4] = d1[n];
out_tm0[n + 8] = d2[n];
out_tm0[n + 12] = d3[n];
}
#endif
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p + 1);
Mat out2_tm = top_blob_tm.channel(p + 2);
Mat out3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p + 1);
const Mat kernel2_tm = kernel_tm.channel(p + 2);
const Mat kernel3_tm = kernel_tm.channel(p + 3);
for (int i = 0; i < tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float* output1_tm = out1_tm.row(i);
float* output2_tm = out2_tm.row(i);
float* output3_tm = out3_tm.row(i);
#if __AVX__
float zero_val = 0.f;
__m256 _sum0 = _mm256_broadcast_ss(&zero_val);
__m256 _sum0n = _mm256_broadcast_ss(&zero_val);
__m256 _sum1 = _mm256_broadcast_ss(&zero_val);
__m256 _sum1n = _mm256_broadcast_ss(&zero_val);
__m256 _sum2 = _mm256_broadcast_ss(&zero_val);
__m256 _sum2n = _mm256_broadcast_ss(&zero_val);
__m256 _sum3 = _mm256_broadcast_ss(&zero_val);
__m256 _sum3n = _mm256_broadcast_ss(&zero_val);
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0 + 8);
// k0
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0 + 8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1 + 8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2 + 8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3 + 8);
_sum0 = _mm256_comp_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_comp_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_comp_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_comp_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_comp_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_comp_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_comp_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_comp_fmadd_ps(_r0n, _k3n, _sum3n);
// k1
_r0 = _mm256_loadu_ps(r1);
_r0n = _mm256_loadu_ps(r1 + 8);
_k0 = _mm256_loadu_ps(k0 + 16);
_k0n = _mm256_loadu_ps(k0 + 24);
_k1 = _mm256_loadu_ps(k1 + 16);
_k1n = _mm256_loadu_ps(k1 + 24);
_k2 = _mm256_loadu_ps(k2 + 16);
_k2n = _mm256_loadu_ps(k2 + 24);
_k3 = _mm256_loadu_ps(k3 + 16);
_k3n = _mm256_loadu_ps(k3 + 24);
_sum0 = _mm256_comp_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_comp_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_comp_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_comp_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_comp_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_comp_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_comp_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_comp_fmadd_ps(_r0n, _k3n, _sum3n);
// k2
_r0 = _mm256_loadu_ps(r2);
_r0n = _mm256_loadu_ps(r2 + 8);
_k0 = _mm256_loadu_ps(k0 + 32);
_k0n = _mm256_loadu_ps(k0 + 40);
_k1 = _mm256_loadu_ps(k1 + 32);
_k1n = _mm256_loadu_ps(k1 + 40);
_k2 = _mm256_loadu_ps(k2 + 32);
_k2n = _mm256_loadu_ps(k2 + 40);
_k3 = _mm256_loadu_ps(k3 + 32);
_k3n = _mm256_loadu_ps(k3 + 40);
_sum0 = _mm256_comp_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_comp_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_comp_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_comp_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_comp_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_comp_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_comp_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_comp_fmadd_ps(_r0n, _k3n, _sum3n);
// k3
_r0 = _mm256_loadu_ps(r3);
_r0n = _mm256_loadu_ps(r3 + 8);
_k0 = _mm256_loadu_ps(k0 + 48);
_k0n = _mm256_loadu_ps(k0 + 56);
_k1 = _mm256_loadu_ps(k1 + 48);
_k1n = _mm256_loadu_ps(k1 + 56);
_k2 = _mm256_loadu_ps(k2 + 48);
_k2n = _mm256_loadu_ps(k2 + 56);
_k3 = _mm256_loadu_ps(k3 + 48);
_k3n = _mm256_loadu_ps(k3 + 56);
_sum0 = _mm256_comp_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_comp_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_comp_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_comp_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_comp_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_comp_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_comp_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_comp_fmadd_ps(_r0n, _k3n, _sum3n);
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0 + 8);
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0 + 8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1 + 8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2 + 8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3 + 8);
_sum0 = _mm256_comp_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_comp_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_comp_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_comp_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_comp_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_comp_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_comp_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_comp_fmadd_ps(_r0n, _k3n, _sum3n);
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output0_tm + 8, _sum0n);
_mm256_storeu_ps(output1_tm, _sum1);
_mm256_storeu_ps(output1_tm + 8, _sum1n);
_mm256_storeu_ps(output2_tm, _sum2);
_mm256_storeu_ps(output2_tm + 8, _sum2n);
_mm256_storeu_ps(output3_tm, _sum3);
_mm256_storeu_ps(output3_tm + 8, _sum3n);
#else
float sum0[16] = {0.0f};
float sum1[16] = {0.0f};
float sum2[16] = {0.0f};
float sum3[16] = {0.0f};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
k0 += 16;
sum0[n] += r1[n] * k0[n];
k0 += 16;
sum0[n] += r2[n] * k0[n];
k0 += 16;
sum0[n] += r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += r0[n] * k1[n];
k1 += 16;
sum1[n] += r1[n] * k1[n];
k1 += 16;
sum1[n] += r2[n] * k1[n];
k1 += 16;
sum1[n] += r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += r0[n] * k2[n];
k2 += 16;
sum2[n] += r1[n] * k2[n];
k2 += 16;
sum2[n] += r2[n] * k2[n];
k2 += 16;
sum2[n] += r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += r0[n] * k3[n];
k3 += 16;
sum3[n] += r1[n] * k3[n];
k3 += 16;
sum3[n] += r2[n] * k3[n];
k3 += 16;
sum3[n] += r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
sum1[n] += r0[n] * k1[n];
sum2[n] += r0[n] * k2[n];
sum3[n] += r0[n] * k3[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i = 0; i < tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float sum0[16] = {0.0f};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q + 1);
const float* k2 = kernel0_tm.row(q + 2);
const float* k3 = kernel0_tm.row(q + 3);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
sum0[n] += r1[n] * k1[n];
sum0[n] += r2[n] * k2[n];
sum0[n] += r3[n] * k3[n];
}
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
float* outRow0 = out.row(j * 2);
float* outRow1 = out.row(j * 2 + 1);
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tile = out_tm.row(j * nRowBlocks + i);
float s0[4], s1[4], s2[4], s3[4];
float w0[4], w1[4];
float d0[2], d1[2], d2[2], d3[2];
float o0[2], o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 4];
s2[n] = out_tile[n + 8];
s3[n] = out_tile[n + 12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d1[0] = w0[1];
d1[1] = w1[1];
d2[0] = w0[2];
d2[1] = w1[2];
d3[0] = w0[3];
d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + bias0;
o1[n] = d1[n] - d2[n] + d3[n] + bias0;
}
// save to top blob tm
outRow0[0] = o0[0];
outRow0[1] = o0[1];
outRow1[0] = o1[0];
outRow1[1] = o1[1];
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch, const Option& opt)
{
Mat kernel_tm(6 * 6, inch, outch);
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r = 0; r < 9; r++)
{
Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p + 1);
const float* kernel2 = (const float*)kernel_tm.channel(p + 2);
const float* kernel3 = (const float*)kernel_tm.channel(p + 3);
const float* kernel4 = (const float*)kernel_tm.channel(p + 4);
const float* kernel5 = (const float*)kernel_tm.channel(p + 5);
const float* kernel6 = (const float*)kernel_tm.channel(p + 6);
const float* kernel7 = (const float*)kernel_tm.channel(p + 7);
float* ktmp = kernel_tm_test.channel(p / 8);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p + 1);
const float* kernel2 = (const float*)kernel_tm.channel(p + 2);
const float* kernel3 = (const float*)kernel_tm.channel(p + 3);
float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 9, elemsize, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row(q);
float* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row(q);
float* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row(q);
float* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row(q);
float* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row(q);
float* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row(q);
float* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row(q);
float* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row(q);
float* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row(q);
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_comp_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_comp_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_comp_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_comp_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_comp_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_comp_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_comp_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_comp_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_comp_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
#if (defined _WIN32 && !(defined __MINGW32__) && !__clang__)
{
_t0.m256_f32[0] = _w0.m256_f32[0];
_t1.m256_f32[0] = _w0.m256_f32[1];
_t2.m256_f32[0] = _w0.m256_f32[2];
_t3.m256_f32[0] = _w0.m256_f32[3];
_t4.m256_f32[0] = _w0.m256_f32[4];
_t5.m256_f32[0] = _w0.m256_f32[5];
_t0.m256_f32[1] = _w1.m256_f32[0];
_t1.m256_f32[1] = _w1.m256_f32[1];
_t2.m256_f32[1] = _w1.m256_f32[2];
_t3.m256_f32[1] = _w1.m256_f32[3];
_t4.m256_f32[1] = _w1.m256_f32[4];
_t5.m256_f32[1] = _w1.m256_f32[5];
_t0.m256_f32[2] = _w2.m256_f32[0];
_t1.m256_f32[2] = _w2.m256_f32[1];
_t2.m256_f32[2] = _w2.m256_f32[2];
_t3.m256_f32[2] = _w2.m256_f32[3];
_t4.m256_f32[2] = _w2.m256_f32[4];
_t5.m256_f32[2] = _w2.m256_f32[5];
_t0.m256_f32[3] = _w3.m256_f32[0];
_t1.m256_f32[3] = _w3.m256_f32[1];
_t2.m256_f32[3] = _w3.m256_f32[2];
_t3.m256_f32[3] = _w3.m256_f32[3];
_t4.m256_f32[3] = _w3.m256_f32[4];
_t5.m256_f32[3] = _w3.m256_f32[5];
_t0.m256_f32[4] = _w4.m256_f32[0];
_t1.m256_f32[4] = _w4.m256_f32[1];
_t2.m256_f32[4] = _w4.m256_f32[2];
_t3.m256_f32[4] = _w4.m256_f32[3];
_t4.m256_f32[4] = _w4.m256_f32[4];
_t5.m256_f32[4] = _w4.m256_f32[5];
_t0.m256_f32[5] = _w5.m256_f32[0];
_t1.m256_f32[5] = _w5.m256_f32[1];
_t2.m256_f32[5] = _w5.m256_f32[2];
_t3.m256_f32[5] = _w5.m256_f32[3];
_t4.m256_f32[5] = _w5.m256_f32[4];
_t5.m256_f32[5] = _w5.m256_f32[5];
}
#else
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
#endif
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_comp_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_comp_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_comp_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_comp_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_comp_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_comp_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_comp_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_comp_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_comp_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
float output_n0[8] = {0.f};
_mm256_storeu_ps(output_n0, _n0);
float output_n1[8] = {0.f};
_mm256_storeu_ps(output_n1, _n1);
float output_n2[8] = {0.f};
_mm256_storeu_ps(output_n2, _n2);
float output_n3[8] = {0.f};
_mm256_storeu_ps(output_n3, _n3);
float output_n4[8] = {0.f};
_mm256_storeu_ps(output_n4, _n4);
float output_n5[8] = {0.f};
_mm256_storeu_ps(output_n5, _n5);
out_tm0[0] = output_n0[0];
out_tm0[1] = output_n0[1];
out_tm0[2] = output_n0[2];
out_tm0[3] = output_n0[3];
out_tm1[0] = output_n0[4];
out_tm1[1] = output_n0[5];
out_tm1[2] = output_n1[0];
out_tm1[3] = output_n1[1];
out_tm2[0] = output_n1[2];
out_tm2[1] = output_n1[3];
out_tm2[2] = output_n1[4];
out_tm2[3] = output_n1[5];
out_tm3[0] = output_n2[0];
out_tm3[1] = output_n2[1];
out_tm3[2] = output_n2[2];
out_tm3[3] = output_n2[3];
out_tm4[0] = output_n2[4];
out_tm4[1] = output_n2[5];
out_tm4[2] = output_n3[0];
out_tm4[3] = output_n3[1];
out_tm5[0] = output_n3[2];
out_tm5[1] = output_n3[3];
out_tm5[2] = output_n3[4];
out_tm5[3] = output_n3[5];
out_tm6[0] = output_n4[0];
out_tm6[1] = output_n4[1];
out_tm6[2] = output_n4[2];
out_tm6[3] = output_n4[3];
out_tm7[0] = output_n4[4];
out_tm7[1] = output_n4[5];
out_tm7[2] = output_n5[0];
out_tm7[3] = output_n5[1];
out_tm8[0] = output_n5[2];
out_tm8[1] = output_n5[3];
out_tm8[2] = output_n5[4];
out_tm8[3] = output_n5[5];
#else
float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __AVX__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, elemsize, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
float* output4_tm = top_blob_tm.channel(p + 4);
float* output5_tm = top_blob_tm.channel(p + 5);
float* output6_tm = top_blob_tm.channel(p + 6);
float* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q = 0;
for (; q + 3 < inch; q = q + 4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0 + 4);
__m128 _r2 = _mm_loadu_ps(r0 + 8);
__m128 _r3 = _mm_loadu_ps(r0 + 12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_comp_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_comp_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_comp_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_comp_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_comp_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_comp_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_comp_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_comp_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_comp_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_comp_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_comp_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_comp_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_comp_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_comp_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_comp_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
sum4[n] += r0[n] * kptr[n + 16];
sum5[n] += r0[n] * kptr[n + 20];
sum6[n] += r0[n] * kptr[n + 24];
sum7[n] += r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
#if __AVX__
_sum0 = _mm_comp_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_comp_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// float* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const float* r0 = bottom_blob_tm.channel(q).row<float>(i);
// const float* k0 = kernel0_tm.row<float>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, opt.workspace_allocator);
}
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* out_tile = top_blob_tm.channel(p);
float* outRow0 = top_blob_bordered.channel(p);
float* outRow1 = outRow0 + outw;
float* outRow2 = outRow0 + outw * 2;
float* outRow3 = outRow0 + outw * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
// TODO AVX2
float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
float w0[6], w1[6], w2[6], w3[6];
float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
float o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img;
const float* r1 = img + w;
const float* r2 = img + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
test_init_mt.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2020. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <ucp/api/ucp.h>
#if _OPENMP
#include <omp.h>
#endif
#include <time.h>
#include <sys/time.h>
int main(int argc, char **argv)
{
int count = 0;
struct timeval start;
struct timeval finish;
gettimeofday(&start, NULL);
printf("starting test [%ld.%06ld] .. ", start.tv_sec, start.tv_usec);
fflush(stdout);
#pragma omp parallel
{
ucs_status_t ctx_status, worker_status;
ucp_context_h context;
ucp_worker_h worker;
ucp_params_t params;
ucp_worker_params_t wparams;
params.field_mask = UCP_PARAM_FIELD_FEATURES;
params.features = UCP_FEATURE_TAG | UCP_FEATURE_STREAM;
ctx_status = ucp_init(¶ms, NULL, &context);
if (ctx_status == UCS_OK) {
wparams.field_mask = 0;
worker_status = ucp_worker_create(context, &wparams, &worker);
if (worker_status == UCS_OK) {
__sync_add_and_fetch(&count, 1);
}
}
#pragma omp barrier
if (ctx_status == UCS_OK) {
if (worker_status == UCS_OK) {
ucp_worker_destroy(worker);
}
ucp_cleanup(context);
}
}
#pragma omp barrier
gettimeofday(&finish, NULL);
printf("[%ld.%06ld] finished %d threads\n",
finish.tv_sec, finish.tv_usec, count);
fflush(stdout);
return 0;
}
|
vecadd_opt3.c | #include <stdio.h>
#include <omp.h>
#include "timer.h"
// large enough to force into main memory
#define ARRAY_SIZE 80000000
static double a[ARRAY_SIZE], b[ARRAY_SIZE], c[ARRAY_SIZE];
void vector_add(double *c, double *a, double *b, int n);
int main(int argc, char *argv[]){
#pragma omp parallel
if (omp_get_thread_num() == 0)
printf("Running with %d thread(s)\n",omp_get_num_threads());
struct timespec tstart;
double time_sum = 0.0;
#pragma omp parallel
{
#pragma omp for
for (int i=0; i<ARRAY_SIZE; i++) {
a[i] = 1.0;
b[i] = 2.0;
}
#pragma omp master
cpu_timer_start(&tstart);
vector_add(c, a, b, ARRAY_SIZE);
#pragma omp master
time_sum += cpu_timer_stop(tstart);
} // end of omp parallel
printf("Runtime is %lf msecs\n", time_sum);
}
void vector_add(double *c, double *a, double *b, int n)
{
#pragma omp for
for (int i=0; i < n; i++){
c[i] = a[i] + b[i];
}
}
|
FindHaloes.c |
// Re-write of find_halos.c from the original 21cmFAST
// ComputeHaloField takes in a k_space box of the linear overdensity field
// and filters it on decreasing scales in order to find virialized halos.
// Virialized halos are defined according to the linear critical overdensity.
// ComputeHaloField outputs a cube with non-zero elements containing the Mass of
// the virialized halos
int check_halo(char * in_halo, struct UserParams *user_params, float R, int x, int y, int z, int check_type);
int pixel_in_halo(struct UserParams *user_params, int x, int x_index, int y, int y_index, int z, int z_index, float Rsq_curr_index );
void init_halo_coords(struct HaloField *halos, int n_halos);
void free_halo_field(struct HaloField *halos);
void init_hmf(struct HaloField *halos);
void trim_hmf(struct HaloField *halos);
int ComputeHaloField(float redshift, struct UserParams *user_params, struct CosmoParams *cosmo_params,
struct AstroParams *astro_params, struct FlagOptions *flag_options,
struct InitialConditions *boxes, struct HaloField *halos) {
int status;
Try{ // This Try brackets the whole function, so we don't indent.
LOG_DEBUG("input value:");
LOG_DEBUG("redshift=%f", redshift);
#if LOG_LEVEL >= DEBUG_LEVEL
writeUserParams(user_params);
writeCosmoParams(cosmo_params);
writeAstroParams(flag_options, astro_params);
writeFlagOptions(flag_options);
#endif
// Makes the parameter structs visible to a variety of functions/macros
// Do each time to avoid Python garbage collection issues
Broadcast_struct_global_PS(user_params,cosmo_params);
Broadcast_struct_global_UF(user_params,cosmo_params);
omp_set_num_threads(user_params->N_THREADS);
fftwf_complex *density_field, *density_field_saved;
float growth_factor, R, delta_m, dm, dlnm, M, Delta_R, delta_crit;
double fgtrm, dfgtrm;
unsigned long long ct;
char filename[80], *in_halo, *forbidden;
int i,j,k,x,y,z,dn,n,counter;
int total_halo_num;
float R_temp, x_temp, y_temp, z_temp, dummy, M_MIN;
LOG_DEBUG("Begin Initialisation");
counter = 0;
// ***************** END INITIALIZATION ***************** //
init_ps();
growth_factor = dicke(redshift); // normalized to 1 at z=0
delta_crit = Deltac; // for now set to spherical; check if we want elipsoidal later
//set the minimum source mass
if(flag_options->USE_MASS_DEPENDENT_ZETA) {
M_MIN = astro_params->M_TURN;
}
else {
if(flag_options->M_MIN_in_Mass) {
M_MIN = (astro_params->M_TURN);
}
else {
//set the minimum source mass
if (astro_params->ION_Tvir_MIN < 9.99999e3) { // neutral IGM
M_MIN = TtoM(redshift, astro_params->ION_Tvir_MIN, 1.22);
}
else { // ionized IGM
M_MIN = TtoM(redshift, astro_params->ION_Tvir_MIN, 0.6);
}
}
}
// allocate array for the k-space box
density_field = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
density_field_saved = (fftwf_complex *) fftwf_malloc(sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// allocate memory for the boolean in_halo box
in_halo = (char *) malloc(sizeof(char)*TOT_NUM_PIXELS);
// initialize
memset(in_halo, 0, sizeof(char)*TOT_NUM_PIXELS);
if(global_params.OPTIMIZE) {
forbidden = (char *) malloc(sizeof(char)*TOT_NUM_PIXELS);
}
#pragma omp parallel shared(boxes,density_field) private(i,j,k) num_threads(user_params->N_THREADS)
{
#pragma omp for
for (i=0; i<user_params->DIM; i++){
for (j=0; j<user_params->DIM; j++){
for (k=0; k<user_params->DIM; k++){
*((float *)density_field + R_FFT_INDEX(i,j,k)) = *((float *)boxes->hires_density + R_INDEX(i,j,k));
}
}
}
}
dft_r2c_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, density_field);
// save a copy of the k-space density field
memcpy(density_field_saved, density_field, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// ***************** END INITIALIZATION ***************** //
LOG_DEBUG("Finalised Initialisation");
// lets filter it now
// set initial R value
R = MtoR(M_MIN*1.01); // one percent higher for rounding
LOG_DEBUG("Prepare to filter to find halos");
while (R < L_FACTOR*user_params->BOX_LEN)
R*=global_params.DELTA_R_FACTOR;
fgtrm=dfgtrm=0;
n=0;
Delta_R = L_FACTOR*2.*user_params->BOX_LEN/(user_params->DIM+0.0);
total_halo_num = 0;
// This uses more memory than absolutely necessary, but is fastest.
init_hmf(halos);
float *halo_field = calloc(TOT_NUM_PIXELS, sizeof(float));
while ((R > 0.5*Delta_R) && (RtoM(R) >= M_MIN)){ // filter until we get to half the pixel size or M_MIN
LOG_ULTRA_DEBUG("while loop for finding halos: R = %f 0.5*Delta_R = %f RtoM(R)=%f M_MIN=%f", R, 0.5*Delta_R, RtoM(R), M_MIN);
M = RtoM(R);
if(global_params.DELTA_CRIT_MODE == 1 && (user_params->HMF>0 && user_params->HMF<4)){
if(user_params->HMF==1) {
// use sheth tormen correction
delta_crit = growth_factor*sheth_delc(Deltac/growth_factor, sigma_z0(M));
}
}
// first let's check if virialized halos of this size are rare enough
// that we don't have to worry about them (let's define 7 sigma away, as in Mesinger et al 05)
if ((sigma_z0(M)*growth_factor*7.) < delta_crit){
LOG_DEBUG("Haloes too rare for M = %e! Skipping...", M);
R /= global_params.DELTA_R_FACTOR;
continue;
}
memcpy(density_field, density_field_saved, sizeof(fftwf_complex)*KSPACE_NUM_PIXELS);
// now filter the box on scale R
// 0 = top hat in real space, 1 = top hat in k space
filter_box(density_field, 0, global_params.HALO_FILTER, R);
// do the FFT to get delta_m box
dft_c2r_cube(user_params->USE_FFTW_WISDOM, user_params->DIM, user_params->N_THREADS, density_field);
// ***************** BEGIN OPTIMIZATION ***************** //
// to optimize speed, if the filter size is large (switch to collapse fraction criteria later)
if(global_params.OPTIMIZE) {
if(M > global_params.OPTIMIZE_MIN_MASS) {
memset(forbidden, 0, sizeof(char)*TOT_NUM_PIXELS);
// now go through the list of existing halos and paint on the no-go region onto <forbidden>
for (x=0; x<user_params->DIM; x++){
for (y=0; y<user_params->DIM; y++){
for (z=0; z<user_params->DIM; z++){
if(halo_field[R_INDEX(x,y,z)] > 0.) {
R_temp = MtoR(halo_field[R_INDEX(x,y,z)]);
check_halo(forbidden, user_params, R_temp+global_params.R_OVERLAP_FACTOR*R, x,y,z,2);
}
}
}
}
}
}
// ***************** END OPTIMIZATION ***************** //
// now lets scroll through the box, flagging all pixels with delta_m > delta_crit
dn=0;
for (x=0; x<user_params->DIM; x++){
for (y=0; y<user_params->DIM; y++){
for (z=0; z<user_params->DIM; z++){
delta_m = *((float *)density_field + R_FFT_INDEX(x,y,z)) * growth_factor / VOLUME; // don't forget the factor of 1/VOLUME!
// if not within a larger halo, and radii don't overlap, update in_halo box
// ***************** BEGIN OPTIMIZATION ***************** //
if(global_params.OPTIMIZE) {
if(M > global_params.OPTIMIZE_MIN_MASS) {
if ( (delta_m > delta_crit) && !forbidden[R_INDEX(x,y,z)]){
check_halo(in_halo, user_params, R, x,y,z,2); // flag the pixels contained within this halo
check_halo(forbidden, user_params, (1.+global_params.R_OVERLAP_FACTOR)*R, x,y,z,2); // flag the pixels contained within this halo
halo_field[R_INDEX(x,y,z)] = M;
dn++; // keep track of the number of halos
n++;
total_halo_num++;
}
}
}
// ***************** END OPTIMIZATION ***************** //
else {
if ((delta_m > delta_crit) && !in_halo[R_INDEX(x,y,z)] && !check_halo(in_halo, user_params, R, x,y,z,1)){ // we found us a "new" halo!
check_halo(in_halo, user_params, R, x,y,z,2); // flag the pixels contained within this halo
halo_field[R_INDEX(x,y,z)] = M;
dn++; // keep track of the number of halos
n++;
total_halo_num++;
}
}
}
}
}
if (dn > 0){
// now lets keep the mass functions (FgrtR)
fgtrm += M/(RHOcrit*cosmo_params->OMm)*dn/VOLUME;
dfgtrm += pow(M/(RHOcrit*cosmo_params->OMm)*sqrt(dn)/VOLUME, 2);
// and the dndlnm files
dlnm = log(RtoM(global_params.DELTA_R_FACTOR*R)) - log(M);
if (halos->n_mass_bins == halos->max_n_mass_bins){
// We've gone past the limit.
LOG_WARNING("Code has required more than 100 mass bins, and will no longer store masses.");
}
else{
halos->mass_bins[halos->n_mass_bins] = M;
halos->fgtrm[halos->n_mass_bins] = fgtrm;
halos->sqrt_dfgtrm[halos->n_mass_bins] = sqrt(dfgtrm);
halos->dndlm[halos->n_mass_bins] = dn/VOLUME/dlnm;
halos->sqrtdn_dlm[halos->n_mass_bins] = sqrt(dn)/VOLUME/dlnm;
halos->n_mass_bins++;
}
}
R /= global_params.DELTA_R_FACTOR;
}
LOG_DEBUG("Obtained halo masses and positions, now saving to HaloField struct.");
// Trim the mass function entries
trim_hmf(halos);
// Initialize the halo co-ordinate and mass arrays.
init_halo_coords(halos, total_halo_num);
// reuse counter as its no longer needed
counter = 0;
for (x=0; x<user_params->DIM; x++){
for (y=0; y<user_params->DIM; y++){
for (z=0; z<user_params->DIM; z++){
if(halo_field[R_INDEX(x,y,z)] > 0.) {
halos->halo_masses[counter] = halo_field[R_INDEX(x,y,z)];
halos->halo_coords[0 + counter*3] = x;
halos->halo_coords[1 + counter*3] = y;
halos->halo_coords[2 + counter*3] = z;
counter++;
}
}
}
}
LOG_DEBUG("Finished halo processing.");
free(in_halo);
free(halo_field);
if(global_params.OPTIMIZE) {
free(forbidden);
}
fftwf_free(density_field);
fftwf_free(density_field_saved);
fftwf_cleanup_threads();
fftwf_cleanup();
fftwf_forget_wisdom();
LOG_DEBUG("Finished halo cleanup.");
LOG_DEBUG("Found %d Halos", halos->n_halos);
if (halos->n_halos > 3)
LOG_DEBUG("Halo Masses: %e %e %e %e", halos->halo_masses[0], halos->halo_masses[1], halos->halo_masses[2], halos->halo_masses[3]);
} // End of Try()
Catch(status){
return(status);
}
return(0);
}
// Function check_halo combines the original two functions overlap_halo and update_in_halo
// from the original 21cmFAST. Lots of redundant code, hence reduced into a single function
int check_halo(char * in_halo, struct UserParams *user_params, float R, int x, int y, int z, int check_type) {
// if check_type == 1 (perform original overlap halo)
// Funtion OVERLAP_HALO checks if the would be halo with radius R
// and centered on (x,y,z) overlaps with a pre-existing halo
// if check_type == 2 (perform original update in halo)
// Funtion UPDATE_IN_HALO takes in a box <in_halo> and flags all points
// which fall within radius R of (x,y,z).
int x_curr, y_curr, z_curr, x_min, x_max, y_min, y_max, z_min, z_max, R_index;
float Rsq_curr_index, xsq, xplussq, xminsq, ysq, yplussq, yminsq, zsq, zplussq, zminsq;
int x_index, y_index, z_index;
if(check_type==1) {
// scale R to a effective overlap size, using R_OVERLAP_FACTOR
R *= global_params.R_OVERLAP_FACTOR;
}
// convert R to index units
R_index = ceil(R/user_params->BOX_LEN*user_params->DIM);
Rsq_curr_index = pow(R/user_params->BOX_LEN*user_params->DIM, 2); // convert to index
// set parameter range
x_min = x-R_index;
x_max = x+R_index;
y_min = y-R_index;
y_max = y+R_index;
z_min = z-R_index;
z_max = z+R_index;
for (x_curr=x_min; x_curr<=x_max; x_curr++){
for (y_curr=y_min; y_curr<=y_max; y_curr++){
for (z_curr=z_min; z_curr<=z_max; z_curr++){
x_index = x_curr;
y_index = y_curr;
z_index = z_curr;
// adjust if we are outside of the box
if (x_index<0) {x_index += user_params->DIM;}
else if (x_index>=user_params->DIM) {x_index -= user_params->DIM;}
if (y_index<0) {y_index += user_params->DIM;}
else if (y_index>=user_params->DIM) {y_index -= user_params->DIM;}
if (z_index<0) {z_index += user_params->DIM;}
else if (z_index>=user_params->DIM) {z_index -= user_params->DIM;}
if(check_type==1) {
if ( in_halo[R_INDEX(x_index, y_index, z_index)] &&
pixel_in_halo(user_params,x,x_index,y,y_index,z,z_index,Rsq_curr_index) ) {
// this pixel already belongs to a halo, and would want to become part of this halo as well
return 1;
}
}
else if(check_type==2) {
// now check
if (!in_halo[R_INDEX(x_index, y_index, z_index)]){
if(pixel_in_halo(user_params,x,x_index,y,y_index,z,z_index,Rsq_curr_index)) {
// we are within the sphere defined by R, so change flag in in_halo array
in_halo[R_INDEX(x_index, y_index, z_index)] = 1;
}
}
}
else {
LOG_ERROR("check_type must be 1 or 2, got %d", check_type);
Throw ValueError;
}
}
}
}
if(check_type==1) {
return 0;
}
}
void init_halo_coords(struct HaloField *halos, int n_halos){
// Minimise memory usage by only storing the halo mass and positions
int i;
halos->n_halos = n_halos;
halos->halo_masses = (float *)calloc(n_halos,sizeof(float));
halos->halo_coords = (int *)calloc(3*n_halos,sizeof(int));
}
void free_halo_field(struct HaloField *halos){
LOG_DEBUG("Freeing HaloField instance.");
free(halos->halo_masses);
free(halos->halo_coords);
halos->n_halos = 0;
free(halos->mass_bins);
free(halos->fgtrm);
free(halos->sqrt_dfgtrm);
free(halos->dndlm);
free(halos->sqrtdn_dlm);
halos->n_mass_bins = 0;
}
void init_hmf(struct HaloField *halos){
// Initalize mass function array with an abitrary large number of elements.
// We will trim it later.
halos->max_n_mass_bins = 100;
halos->mass_bins = (float *) malloc(sizeof(float) * halos->max_n_mass_bins);
halos->fgtrm = (float *) malloc(sizeof(float) * halos->max_n_mass_bins);
halos->sqrt_dfgtrm = (float *) malloc(sizeof(float) * halos->max_n_mass_bins);
halos->dndlm = (float *) malloc(sizeof(float) * halos->max_n_mass_bins);
halos->sqrtdn_dlm = (float *) malloc(sizeof(float) * halos->max_n_mass_bins);
halos->n_mass_bins = 0;
}
void trim_hmf(struct HaloField *halos){
// Trim hmf arrays down to actual number of mass bins.
if (halos->n_mass_bins > 0){
halos->mass_bins = (float *) realloc(halos->mass_bins, sizeof(float) * halos->n_mass_bins);
halos->fgtrm = (float *) realloc(halos->fgtrm, sizeof(float) * halos->n_mass_bins);
halos->sqrt_dfgtrm = (float *) realloc(halos->sqrt_dfgtrm, sizeof(float) * halos->n_mass_bins);
halos->dndlm = (float *) realloc(halos->dndlm, sizeof(float) * halos->n_mass_bins);
halos->sqrtdn_dlm = (float *) realloc(halos->sqrtdn_dlm, sizeof(float) * halos->n_mass_bins);
}
}
int pixel_in_halo(struct UserParams *user_params, int x, int x_index, int y, int y_index, int z, int z_index, float Rsq_curr_index ) {
float xsq, xplussq, xminsq, ysq, yplussq, yminsq, zsq, zplussq, zminsq;
// remember to check all reflections
xsq = pow(x-x_index, 2);
ysq = pow(y-y_index, 2);
zsq = pow(z-z_index, 2);
xplussq = pow(x-x_index+user_params->DIM, 2);
yplussq = pow(y-y_index+user_params->DIM, 2);
zplussq = pow(z-z_index+user_params->DIM, 2);
xminsq = pow(x-x_index-user_params->DIM, 2);
yminsq = pow(y-y_index-user_params->DIM, 2);
zminsq = pow(z-z_index-user_params->DIM, 2);
if(
( (Rsq_curr_index > (xsq + ysq + zsq)) || // AND pixel is within this halo
(Rsq_curr_index > (xsq + ysq + zplussq)) ||
(Rsq_curr_index > (xsq + ysq + zminsq)) ||
(Rsq_curr_index > (xsq + yplussq + zsq)) ||
(Rsq_curr_index > (xsq + yplussq + zplussq)) ||
(Rsq_curr_index > (xsq + yplussq + zminsq)) ||
(Rsq_curr_index > (xsq + yminsq + zsq)) ||
(Rsq_curr_index > (xsq + yminsq + zplussq)) ||
(Rsq_curr_index > (xsq + yminsq + zminsq)) ||
(Rsq_curr_index > (xplussq + ysq + zsq)) ||
(Rsq_curr_index > (xplussq + ysq + zplussq)) ||
(Rsq_curr_index > (xplussq + ysq + zminsq)) ||
(Rsq_curr_index > (xplussq + yplussq + zsq)) ||
(Rsq_curr_index > (xplussq + yplussq + zplussq)) ||
(Rsq_curr_index > (xplussq + yplussq + zminsq)) ||
(Rsq_curr_index > (xplussq + yminsq + zsq)) ||
(Rsq_curr_index > (xplussq + yminsq + zplussq)) ||
(Rsq_curr_index > (xplussq + yminsq + zminsq)) ||
(Rsq_curr_index > (xminsq + ysq + zsq)) ||
(Rsq_curr_index > (xminsq + ysq + zplussq)) ||
(Rsq_curr_index > (xminsq + ysq + zminsq)) ||
(Rsq_curr_index > (xminsq + yplussq + zsq)) ||
(Rsq_curr_index > (xminsq + yplussq + zplussq)) ||
(Rsq_curr_index > (xminsq + yplussq + zminsq)) ||
(Rsq_curr_index > (xminsq + yminsq + zsq)) ||
(Rsq_curr_index > (xminsq + yminsq + zplussq)) ||
(Rsq_curr_index > (xminsq + yminsq + zminsq))
)
) {
return(1);
}
else {
return(0);
}
}
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXBaseSpecifierMatcher = internal::Matcher<CXXBaseSpecifier>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>;
using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>;
using LambdaCaptureMatcher = internal::Matcher<LambdaCapture>;
using AttrMatcher = internal::Matcher<Attr>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt,
TypeLoc),
RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
return RegExp->match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the statement are expanded from different
/// appearances of the macro.
AST_POLYMORPHIC_MATCHER_P(isExpandedFromMacro,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches decomposition-declarations.
///
/// Examples matches the declaration node with \c foo and \c bar, but not
/// \c number.
/// (matcher = declStmt(has(decompositionDecl())))
///
/// \code
/// int number = 42;
/// auto [foo, bar] = std::make_pair{42, 42};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, DecompositionDecl>
decompositionDecl;
/// Matches binding declarations
/// Example matches \c foo and \c bar
/// (matcher = bindingDecl()
///
/// \code
/// auto [foo, bar] = std::make_pair{42, 42};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BindingDecl>
bindingDecl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches class bases.
///
/// Examples matches \c public virtual B.
/// \code
/// class B {};
/// class C : public virtual B {};
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXBaseSpecifier> cxxBaseSpecifier;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template arguments (with location info).
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgumentLoc()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc>
templateArgumentLoc;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches template template parameter declarations.
///
/// Given
/// \code
/// template <template <typename> class Z, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'Z', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
TemplateTemplateParmDecl>
templateTemplateParmDecl;
/// Matches public C++ declarations and C++ base specifers that specify public
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a; // fieldDecl(isPublic()) matches 'a'
/// protected: int b;
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived1 : public Base {}; // matches 'Base'
/// struct Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPublic,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_public;
}
/// Matches protected C++ declarations and C++ base specifers that specify
/// protected inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b; // fieldDecl(isProtected()) matches 'b'
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived : protected Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isProtected,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_protected;
}
/// Matches private C++ declarations and C++ base specifers that specify private
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c; // fieldDecl(isPrivate()) matches 'c'
/// };
/// \endcode
///
/// \code
/// struct Base {};
/// struct Derived1 : private Base {}; // matches 'Base'
/// class Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPrivate,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches an entity that has been implicitly added by the compiler (e.g.
/// implicit default/copy constructors).
AST_POLYMORPHIC_MATCHER(isImplicit,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Attr,
LambdaCapture)) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder) != List.end();
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(TK_IgnoreUnlessSpelledInSource,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename... P> class MatcherT, typename... P,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>>
traverse(TraversalKind TK,
const internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>
&InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>>(TK,
InnerMatcher);
}
template <typename... T>
internal::Matcher<typename internal::GetClade<T...>::Type>
traverse(TraversalKind TK, const internal::MapAnyOfHelper<T...> &InnerMatcher) {
return traverse(TK, InnerMatcher.with());
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for a.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that refers to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return toString(Node.getAsIntegral(), 10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using-enum declarations.
///
/// Given
/// \code
/// namespace X { enum x {...}; }
/// using enum X::x;
/// \endcode
/// usingEnumDecl()
/// matches \code using enum X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingEnumDecl>
usingEnumDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
/// See also the binaryOperation() matcher for more-general matching of binary
/// uses of this AST node.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches rewritten binary operators
///
/// Example matches use of "<":
/// \code
/// #include <compare>
/// struct HasSpaceshipMem {
/// int a;
/// constexpr auto operator<=>(const HasSpaceshipMem&) const = default;
/// };
/// void compare() {
/// HasSpaceshipMem hs1, hs2;
/// if (hs1 < hs2)
/// return;
/// }
/// \endcode
/// See also the binaryOperation() matcher for more-general matching
/// of this AST node.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXRewrittenBinaryOperator>
cxxRewrittenBinaryOperator;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches co_return statements.
///
/// Given
/// \code
/// while (true) { co_return; }
/// \endcode
/// coreturnStmt()
/// matches 'co_return'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoreturnStmt>
coreturnStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches fixed point literals
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
fixedPointLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches co_await expressions.
///
/// Given
/// \code
/// co_await 1;
/// \endcode
/// coawaitExpr()
/// matches 'co_await 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoawaitExpr>
coawaitExpr;
/// Matches co_await expressions where the type of the promise is dependent
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DependentCoawaitExpr>
dependentCoawaitExpr;
/// Matches co_yield expressions.
///
/// Given
/// \code
/// co_yield 1;
/// \endcode
/// coyieldExpr()
/// matches 'co_yield 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoyieldExpr>
coyieldExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches C11 _Generic expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GenericSelectionExpr>
genericSelectionExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
/// See also the binaryOperation() matcher for more-general matching.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatcher.
///
/// However, \c optionally will retain any bindings generated by the submatcher.
/// Useful when additional information which may or may not present about a main
/// matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches any of the \p NodeMatchers with InnerMatchers nested within
///
/// Given
/// \code
/// if (true);
/// for (; true; );
/// \endcode
/// with the matcher
/// \code
/// mapAnyOf(ifStmt, forStmt).with(
/// hasCondition(cxxBoolLiteralExpr(equals(true)))
/// ).bind("trueCond")
/// \endcode
/// matches the \c if and the \c for. It is equivalent to:
/// \code
/// auto trueCond = hasCondition(cxxBoolLiteralExpr(equals(true)));
/// anyOf(
/// ifStmt(trueCond).bind("trueCond"),
/// forStmt(trueCond).bind("trueCond")
/// );
/// \endcode
///
/// The with() chain-call accepts zero or more matchers which are combined
/// as-if with allOf() in each of the node matchers.
/// Usable as: Any Matcher
template <typename T, typename... U>
auto mapAnyOf(internal::VariadicDynCastAllOfMatcher<T, U> const &...) {
return internal::MapAnyOfHelper<U...>();
}
/// Matches nodes which can be used with binary operators.
///
/// The code
/// \code
/// var1 != var2;
/// \endcode
/// might be represented in the clang AST as a binaryOperator, a
/// cxxOperatorCallExpr or a cxxRewrittenBinaryOperator, depending on
///
/// * whether the types of var1 and var2 are fundamental (binaryOperator) or at
/// least one is a class type (cxxOperatorCallExpr)
/// * whether the code appears in a template declaration, if at least one of the
/// vars is a dependent-type (binaryOperator)
/// * whether the code relies on a rewritten binary operator, such as a
/// spaceship operator or an inverted equality operator
/// (cxxRewrittenBinaryOperator)
///
/// This matcher elides details in places where the matchers for the nodes are
/// compatible.
///
/// Given
/// \code
/// binaryOperation(
/// hasOperatorName("!="),
/// hasLHS(expr().bind("lhs")),
/// hasRHS(expr().bind("rhs"))
/// )
/// \endcode
/// matches each use of "!=" in:
/// \code
/// struct S{
/// bool operator!=(const S&) const;
/// };
///
/// void foo()
/// {
/// 1 != 2;
/// S() != S();
/// }
///
/// template<typename T>
/// void templ()
/// {
/// 1 != 2;
/// T() != S();
/// }
/// struct HasOpEq
/// {
/// bool operator==(const HasOpEq &) const;
/// };
///
/// void inverse()
/// {
/// HasOpEq s1;
/// HasOpEq s2;
/// if (s1 != s2)
/// return;
/// }
///
/// struct HasSpaceship
/// {
/// bool operator<=>(const HasOpEq &) const;
/// };
///
/// void use_spaceship()
/// {
/// HasSpaceship s1;
/// HasSpaceship s2;
/// if (s1 != s2)
/// return;
/// }
/// \endcode
extern const internal::MapAnyOfMatcher<BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator>
binaryOperation;
/// Matches function calls and constructor calls
///
/// Because CallExpr and CXXConstructExpr do not share a common
/// base class with API accessing arguments etc, AST Matchers for code
/// which should match both are typically duplicated. This matcher
/// removes the need for duplication.
///
/// Given code
/// \code
/// struct ConstructorTakesInt
/// {
/// ConstructorTakesInt(int i) {}
/// };
///
/// void callTakesInt(int i)
/// {
/// }
///
/// void doCall()
/// {
/// callTakesInt(42);
/// }
///
/// void doConstruct()
/// {
/// ConstructorTakesInt cti(42);
/// }
/// \endcode
///
/// The matcher
/// \code
/// invocation(hasArgument(0, integerLiteral(equals(42))))
/// \endcode
/// matches the expression in both doCall and doConstruct
extern const internal::MapAnyOfMatcher<CallExpr, CXXConstructExpr> invocation;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::BindableMatcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::BindableMatcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) {
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
return RegExp->match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcher<
internal::HasOverloadedOperatorNameMatcher,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl),
std::vector<std::string>>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcher<
internal::HasOverloadedOperatorNameMatcher,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl),
std::vector<std::string>>({std::string(Name)});
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// hasAnyOverloadedOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcher<internal::HasOverloadedOperatorNameMatcher,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXOperatorCallExpr, FunctionDecl),
std::vector<std::string>>,
StringRef, internal::hasAnyOverloadedOperatorNameFunc>
hasAnyOverloadedOperatorName;
/// Matches template-dependent, but known, member names.
///
/// In template declarations, dependent members are not resolved and so can
/// not be matched to particular named declarations.
///
/// This matcher allows to match on the known name of members.
///
/// Given
/// \code
/// template <typename T>
/// struct S {
/// void mem();
/// };
/// template <typename T>
/// void x() {
/// S<T> s;
/// s.mem();
/// }
/// \endcode
/// \c cxxDependentScopeMemberExpr(hasMemberName("mem")) matches `s.mem()`
AST_MATCHER_P(CXXDependentScopeMemberExpr, hasMemberName, std::string, N) {
return Node.getMember().getAsString() == N;
}
/// Matches template-dependent, but known, member names against an already-bound
/// node
///
/// In template declarations, dependent members are not resolved and so can
/// not be matched to particular named declarations.
///
/// This matcher allows to match on the name of already-bound VarDecl, FieldDecl
/// and CXXMethodDecl nodes.
///
/// Given
/// \code
/// template <typename T>
/// struct S {
/// void mem();
/// };
/// template <typename T>
/// void x() {
/// S<T> s;
/// s.mem();
/// }
/// \endcode
/// The matcher
/// @code
/// \c cxxDependentScopeMemberExpr(
/// hasObjectExpression(declRefExpr(hasType(templateSpecializationType(
/// hasDeclaration(classTemplateDecl(has(cxxRecordDecl(has(
/// cxxMethodDecl(hasName("mem")).bind("templMem")
/// )))))
/// )))),
/// memberHasSameNameAsBoundNode("templMem")
/// )
/// @endcode
/// first matches and binds the @c mem member of the @c S template, then
/// compares its name to the usage in @c s.mem() in the @c x function template
AST_MATCHER_P(CXXDependentScopeMemberExpr, memberHasSameNameAsBoundNode,
std::string, BindingID) {
auto MemberName = Node.getMember().getAsString();
return Builder->removeBindings(
[this, MemberName](const BoundNodesMap &Nodes) {
const auto &BN = Nodes.getNode(this->BindingID);
if (const auto *ND = BN.get<NamedDecl>()) {
if (!isa<FieldDecl, CXXMethodDecl, VarDecl>(ND))
return true;
return ND->getName() != MemberName;
}
return true;
});
}
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ classes that have a direct or indirect base matching \p
/// BaseSpecMatcher.
///
/// Example:
/// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived
/// \endcode
///
// FIXME: Refactor this and isDerivedFrom to reuse implementation.
AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder);
}
/// Matches C++ classes that have a direct base matching \p BaseSpecMatcher.
///
/// Example:
/// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; // doesn't match
/// \endcode
AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return Node.hasDefinition() &&
llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) {
return BaseSpecMatcher.matches(Base, Finder, Builder);
});
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
BoundNodesTreeBuilder Result(*Builder);
auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, &Result);
if (MatchIt == Node.method_end())
return false;
if (Finder->isTraversalIgnoringImplicitNodes() && (*MatchIt)->isImplicit())
return false;
*Builder = std::move(Result);
return true;
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc, Attr>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcher<
internal::HasDeclarationMatcher,
void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcher<
internal::HasDeclarationMatcher,
void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>(
InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) {
std::string SelectorString = Node.getSelector().getAsString();
return RegExp->match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// and public virtual X (matcher = cxxBaseSpecifier(hasType(
/// asString("class X")))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// class Z : public virtual X {};
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl, CXXBaseSpecifier),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// and public virtual X (matcher = cxxBaseSpecifier(hasType(
/// cxxRecordDecl(hasName("X"))))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// class Z : public virtual X {};
/// \endcode
///
/// Example matches class Derived
/// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base"))))))
/// \code
/// class Base {};
/// class Derived : Base {};
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>,
/// Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl,
CXXBaseSpecifier),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of a node matches the inner matcher.
///
/// Examples:
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
///
/// \code
/// auto x = int(3);
/// \code
/// cxxTemporaryObjectExpr(hasTypeLoc(loc(asString("int"))))
/// matches int(3)
///
/// \code
/// struct Foo { Foo(int, int); };
/// auto x = Foo(1, 2);
/// \code
/// cxxFunctionalCastExpr(hasTypeLoc(loc(asString("struct Foo"))))
/// matches Foo(1, 2)
///
/// Usable as: Matcher<BlockDecl>, Matcher<CXXBaseSpecifier>,
/// Matcher<CXXCtorInitializer>, Matcher<CXXFunctionalCastExpr>,
/// Matcher<CXXNewExpr>, Matcher<CXXTemporaryObjectExpr>,
/// Matcher<CXXUnresolvedConstructExpr>,
/// Matcher<ClassTemplateSpecializationDecl>, Matcher<CompoundLiteralExpr>,
/// Matcher<DeclaratorDecl>, Matcher<ExplicitCastExpr>,
/// Matcher<ObjCPropertyDecl>, Matcher<TemplateArgumentLoc>,
/// Matcher<TypedefNameDecl>
AST_POLYMORPHIC_MATCHER_P(
hasTypeLoc,
AST_POLYMORPHIC_SUPPORTED_TYPES(
BlockDecl, CXXBaseSpecifier, CXXCtorInitializer, CXXFunctionalCastExpr,
CXXNewExpr, CXXTemporaryObjectExpr, CXXUnresolvedConstructExpr,
ClassTemplateSpecializationDecl, CompoundLiteralExpr, DeclaratorDecl,
ExplicitCastExpr, ObjCPropertyDecl, TemplateArgumentLoc,
TypedefNameDecl),
internal::Matcher<TypeLoc>, Inner) {
TypeSourceInfo *source = internal::GetTypeSourceInfo(Node);
if (source == nullptr) {
// This happens for example for implicit destructors.
return false;
}
return Inner.matches(source->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder,
Builder) != Node.decls_end();
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Matches a variable serving as the implicit variable for a lambda init-
/// capture.
///
/// Example matches x (matcher = varDecl(isInitCapture()))
/// \code
/// auto f = [x=3]() { return x; };
/// \endcode
AST_MATCHER(VarDecl, isInitCapture) { return Node.isInitCapture(); }
/// Matches each lambda capture in a lambda expression.
///
/// Given
/// \code
/// int main() {
/// int x, y;
/// float z;
/// auto f = [=]() { return x + y + z; };
/// }
/// \endcode
/// lambdaExpr(forEachLambdaCapture(
/// lambdaCapture(capturesVar(varDecl(hasType(isInteger()))))))
/// will trigger two matches, binding for 'x' and 'y' respectively.
AST_MATCHER_P(LambdaExpr, forEachLambdaCapture,
internal::Matcher<LambdaCapture>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto &Capture : Node.captures()) {
if (Finder->isTraversalIgnoringImplicitNodes() && Capture.isImplicit())
continue;
BoundNodesTreeBuilder CaptureBuilder(*Builder);
if (InnerMatcher.matches(Capture, Finder, &CaptureBuilder)) {
Matched = true;
Result.addMatch(CaptureBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
unsigned, N) {
unsigned NumArgs = Node.getNumArgs();
if (!Finder->isTraversalIgnoringImplicitNodes())
return NumArgs == N;
while (NumArgs) {
if (!isa<CXXDefaultArgExpr>(Node.getArg(NumArgs - 1)))
break;
--NumArgs;
}
return NumArgs == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
if (N >= Node.getNumArgs())
return false;
const Expr *Arg = Node.getArg(N);
if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg))
return false;
return InnerMatcher.matches(*Arg->IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
if (MatchIt == Node.init_end())
return false;
return (*MatchIt)->isWritten() || !Finder->isTraversalIgnoringImplicitNodes();
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
if (Finder->isTraversalIgnoringImplicitNodes() &&
isa<CXXDefaultArgExpr>(Arg))
break;
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches lambda captures.
///
/// Given
/// \code
/// int main() {
/// int x;
/// auto f = [x](){};
/// auto g = [x = 1](){};
/// }
/// \endcode
/// In the matcher `lambdaExpr(hasAnyCapture(lambdaCapture()))`,
/// `lambdaCapture()` matches `x` and `x=1`.
extern const internal::VariadicAllOfMatcher<LambdaCapture> lambdaCapture;
/// Matches any capture in a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int t = 5;
/// auto f = [=](){ return t; };
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(lambdaCapture())) and
/// lambdaExpr(hasAnyCapture(lambdaCapture(refersToVarDecl(hasName("t")))))
/// both match `[=](){ return t; }`.
AST_MATCHER_P(LambdaExpr, hasAnyCapture, internal::Matcher<LambdaCapture>,
InnerMatcher) {
for (const LambdaCapture &Capture : Node.captures()) {
clang::ast_matchers::internal::BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(Capture, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches a `LambdaCapture` that refers to the specified `VarDecl`. The
/// `VarDecl` can be a separate variable that is captured by value or
/// reference, or a synthesized variable if the capture has an initializer.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// auto g = [x = 1](){};
/// }
/// \endcode
/// In the matcher
/// lambdaExpr(hasAnyCapture(lambdaCapture(capturesVar(hasName("x")))),
/// capturesVar(hasName("x")) matches `x` and `x = 1`.
AST_MATCHER_P(LambdaCapture, capturesVar, internal::Matcher<VarDecl>,
InnerMatcher) {
auto *capturedVar = Node.getCapturedVar();
return capturedVar && InnerMatcher.matches(*capturedVar, Finder, Builder);
}
/// Matches a `LambdaCapture` that refers to 'this'.
///
/// Given
/// \code
/// class C {
/// int cc;
/// int f() {
/// auto l = [this]() { return cc; };
/// return l();
/// }
/// };
/// \endcode
/// lambdaExpr(hasAnyCapture(lambdaCapture(capturesThis())))
/// matches `[this]() { return cc; }`.
AST_MATCHER(LambdaCapture, capturesThis) { return Node.capturesThis(); }
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches all arguments and their respective types for a \c CallExpr or
/// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but
/// it works on calls through function pointers as well.
///
/// The difference is, that function pointers do not provide access to a
/// \c ParmVarDecl, but only the \c QualType for each argument.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// void (*f_ptr)(int) = f;
/// f_ptr(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParamType(
/// declRefExpr(to(varDecl(hasName("y")))),
/// qualType(isInteger()).bind("type)
/// ))
/// matches f(y) and f_ptr(y)
/// with declRefExpr(...)
/// matching int y
/// and qualType(...)
/// matching int
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<QualType>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
const FunctionProtoType *FProto = nullptr;
if (const auto *Call = dyn_cast<CallExpr>(&Node)) {
if (const auto *Value =
dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) {
QualType QT = Value->getType().getCanonicalType();
// This does not necessarily lead to a `FunctionProtoType`,
// e.g. K&R functions do not have a function prototype.
if (QT->isFunctionPointerType())
FProto = QT->getPointeeType()->getAs<FunctionProtoType>();
if (QT->isMemberFunctionPointerType()) {
const auto *MP = QT->getAs<MemberPointerType>();
assert(MP && "Must be member-pointer if its a memberfunctionpointer");
FProto = MP->getPointeeType()->getAs<FunctionProtoType>();
assert(FProto &&
"The call must have happened through a member function "
"pointer");
}
}
}
int ParamIndex = 0;
bool Matched = false;
unsigned NumArgs = Node.getNumArgs();
if (FProto && FProto->isVariadic())
NumArgs = std::min(NumArgs, FProto->getNumParams());
for (; ArgIndex < NumArgs; ++ArgIndex, ++ParamIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder,
&ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
// This test is cheaper compared to the big matcher in the next if.
// Therefore, please keep this order.
if (FProto) {
QualType ParamType = FProto->getParamType(ParamIndex);
if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
continue;
}
}
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, hasType(ParamMatcher))))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, hasType(ParamMatcher)))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
continue;
}
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches the ParmVarDecl nodes that are at the N'th position in the parameter
/// list. The parameter list could be that of either a block, function, or
/// objc-method.
///
///
/// Given
///
/// \code
/// void f(int a, int b, int c) {
/// }
/// \endcode
///
/// ``parmVarDecl(isAtPosition(0))`` matches ``int a``.
///
/// ``parmVarDecl(isAtPosition(1))`` matches ``int b``.
AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) {
const clang::DeclContext *Context = Node.getParentFunctionOrMethod();
if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
return false;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder,
Builder) != Node.param_end();
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches weak function declarations.
///
/// Given:
/// \code
/// void foo() __attribute__((__weakref__("__foo")));
/// void bar();
/// \endcode
/// functionDecl(isWeak())
/// matches the weak declaration "foo", but not "bar".
AST_MATCHER(FunctionDecl, isWeak) { return Node.isWeak(); }
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body. Note that in case of functions
/// this matcher only matches the definition itself and not the other
/// declarations of the same function.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
///
/// Given
/// \code
/// void f();
/// void f() {}
/// \endcode
/// hasBody(functionDecl())
/// matches 'void f() {}'
/// with compoundStmt()
/// matching '{}'
/// but does not match 'void f();'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
if (Finder->isTraversalIgnoringImplicitNodes() && isDefaultedHelper(&Node))
return false;
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches a function declaration that has a given body present in the AST.
/// Note that this matcher matches all the declarations of a function whose
/// body is present in the AST.
///
/// Given
/// \code
/// void f();
/// void f() {}
/// void g();
/// \endcode
/// functionDecl(hasAnyBody(compoundStmt()))
/// matches both 'void f();'
/// and 'void f() {}'
/// with compoundStmt()
/// matching '{}'
/// but does not match 'void g();'
AST_MATCHER_P(FunctionDecl, hasAnyBody,
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = Node.getBody();
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder,
Builder) != CS->body_end();
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcher<internal::ValueEqualsMatcher,
void(internal::AllNodeBaseTypes), ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcher<internal::ValueEqualsMatcher,
void(internal::AllNodeBaseTypes), ValueT>(
Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator, UnaryOperator),
std::string, Name) {
if (Optional<StringRef> OpName = internal::getOpName(Node))
return *OpName == Name;
return false;
}
/// Matches operator expressions (binary or unary) that have any of the
/// specified names.
///
/// hasAnyOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcher<internal::HasAnyOperatorNameMatcher,
AST_POLYMORPHIC_SUPPORTED_TYPES(
BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator, UnaryOperator),
std::vector<std::string>>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(
isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator)) {
return Node.isAssignmentOp();
}
/// Matches comparison operators.
///
/// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 < s2
/// (matcher = cxxOperatorCallExpr(isComparisonOperator()))
/// \code
/// struct S { bool operator<(const S& other); };
/// void x(S s1, S s2) { bool b1 = s1 < s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(
isComparisonOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator)) {
return Node.isComparisonOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(
BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator, ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = internal::getLHS(Node);
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(
BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator, ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = internal::getRHS(Node);
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
AST_POLYMORPHIC_MATCHER_P(
hasEitherOperand,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator),
internal::Matcher<Expr>, InnerMatcher) {
return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()(
anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if both matchers match with opposite sides of the binary operator.
///
/// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1),
/// integerLiteral(equals(2)))
/// \code
/// 1 + 2 // Match
/// 2 + 1 // Match
/// 1 + 1 // No match
/// 2 + 2 // No match
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(
hasOperands,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator),
internal::Matcher<Expr>, Matcher1, internal::Matcher<Expr>, Matcher2) {
return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()(
anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)),
allOf(hasLHS(Matcher2), hasRHS(Matcher1))))
.matches(Node, Finder, Builder);
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasUnaryOperand,
AST_POLYMORPHIC_SUPPORTED_TYPES(UnaryOperator,
CXXOperatorCallExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Operand = internal::getSubExpr(Node);
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
ASTChildrenNotSpelledInSourceScope RAII(Finder, false);
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches declarations of virtual methods and C++ base specifers that specify
/// virtual inheritance.
///
/// Example:
/// \code
/// class A {
/// public:
/// virtual void x(); // matches x
/// };
/// \endcode
///
/// Example:
/// \code
/// class Base {};
/// class DirectlyDerived : virtual Base {}; // matches Base
/// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base
/// \endcode
///
/// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER(isVirtual,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl,
CXXBaseSpecifier)) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
AST_MATCHER(CXXConstructorDecl, isInheritingConstructor) {
return Node.isInheritingConstructor();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(BaseUsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder,
Builder) != Node.shadow_end();
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches `QualifiedTypeLoc`s in the clang AST.
///
/// Given
/// \code
/// const int x = 0;
/// \endcode
/// qualifiedTypeLoc()
/// matches `const int`.
extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, QualifiedTypeLoc>
qualifiedTypeLoc;
/// Matches `QualifiedTypeLoc`s that have an unqualified `TypeLoc` matching
/// `InnerMatcher`.
///
/// Given
/// \code
/// int* const x;
/// const int y;
/// \endcode
/// qualifiedTypeLoc(hasUnqualifiedLoc(pointerTypeLoc()))
/// matches the `TypeLoc` of the variable declaration of `x`, but not `y`.
AST_MATCHER_P(QualifiedTypeLoc, hasUnqualifiedLoc, internal::Matcher<TypeLoc>,
InnerMatcher) {
return InnerMatcher.matches(Node.getUnqualifiedLoc(), Finder, Builder);
}
/// Matches a function declared with the specified return `TypeLoc`.
///
/// Given
/// \code
/// int f() { return 5; }
/// void g() {}
/// \endcode
/// functionDecl(hasReturnTypeLoc(loc(asString("int"))))
/// matches the declaration of `f`, but not `g`.
AST_MATCHER_P(FunctionDecl, hasReturnTypeLoc, internal::Matcher<TypeLoc>,
ReturnMatcher) {
auto Loc = Node.getFunctionTypeLoc();
return Loc && ReturnMatcher.matches(Loc.getReturnLoc(), Finder, Builder);
}
/// Matches pointer `TypeLoc`s.
///
/// Given
/// \code
/// int* x;
/// \endcode
/// pointerTypeLoc()
/// matches `int*`.
extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, PointerTypeLoc>
pointerTypeLoc;
/// Matches pointer `TypeLoc`s that have a pointee `TypeLoc` matching
/// `PointeeMatcher`.
///
/// Given
/// \code
/// int* x;
/// \endcode
/// pointerTypeLoc(hasPointeeLoc(loc(asString("int"))))
/// matches `int*`.
AST_MATCHER_P(PointerTypeLoc, hasPointeeLoc, internal::Matcher<TypeLoc>,
PointeeMatcher) {
return PointeeMatcher.matches(Node.getPointeeLoc(), Finder, Builder);
}
/// Matches reference `TypeLoc`s.
///
/// Given
/// \code
/// int x = 3;
/// int& l = x;
/// int&& r = 3;
/// \endcode
/// referenceTypeLoc()
/// matches `int&` and `int&&`.
extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, ReferenceTypeLoc>
referenceTypeLoc;
/// Matches reference `TypeLoc`s that have a referent `TypeLoc` matching
/// `ReferentMatcher`.
///
/// Given
/// \code
/// int x = 3;
/// int& xx = x;
/// \endcode
/// referenceTypeLoc(hasReferentLoc(loc(asString("int"))))
/// matches `int&`.
AST_MATCHER_P(ReferenceTypeLoc, hasReferentLoc, internal::Matcher<TypeLoc>,
ReferentMatcher) {
return ReferentMatcher.matches(Node.getPointeeLoc(), Finder, Builder);
}
/// Matches template specialization `TypeLoc`s.
///
/// Given
/// \code
/// template <typename T> class C {};
/// C<char> var;
/// \endcode
/// varDecl(hasTypeLoc(templateSpecializationTypeLoc(typeLoc())))
/// matches `C<char> var`.
extern const internal::VariadicDynCastAllOfMatcher<
TypeLoc, TemplateSpecializationTypeLoc>
templateSpecializationTypeLoc;
/// Matches template specialization `TypeLoc`s that have at least one
/// `TemplateArgumentLoc` matching the given `InnerMatcher`.
///
/// Given
/// \code
/// template<typename T> class A {};
/// A<int> a;
/// \endcode
/// varDecl(hasTypeLoc(templateSpecializationTypeLoc(hasAnyTemplateArgumentLoc(
/// hasTypeLoc(loc(asString("int")))))))
/// matches `A<int> a`.
AST_MATCHER_P(TemplateSpecializationTypeLoc, hasAnyTemplateArgumentLoc,
internal::Matcher<TemplateArgumentLoc>, InnerMatcher) {
for (unsigned Index = 0, N = Node.getNumArgs(); Index < N; ++Index) {
clang::ast_matchers::internal::BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(Node.getArgLoc(Index), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches template specialization `TypeLoc`s where the n'th
/// `TemplateArgumentLoc` matches the given `InnerMatcher`.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<double, int> b;
/// A<int, double> c;
/// \endcode
/// varDecl(hasTypeLoc(templateSpecializationTypeLoc(hasTemplateArgumentLoc(0,
/// hasTypeLoc(loc(asString("double")))))))
/// matches `A<double, int> b`, but not `A<int, double> c`.
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgumentLoc,
AST_POLYMORPHIC_SUPPORTED_TYPES(DeclRefExpr, TemplateSpecializationTypeLoc),
unsigned, Index, internal::Matcher<TemplateArgumentLoc>, InnerMatcher) {
return internal::MatchTemplateArgLocAt(Node, Index, InnerMatcher, Finder,
Builder);
}
/// Matches C or C++ elaborated `TypeLoc`s.
///
/// Given
/// \code
/// struct s {};
/// struct s ss;
/// \endcode
/// elaboratedTypeLoc()
/// matches the `TypeLoc` of the variable declaration of `ss`.
extern const internal::VariadicDynCastAllOfMatcher<TypeLoc, ElaboratedTypeLoc>
elaboratedTypeLoc;
/// Matches elaborated `TypeLoc`s that have a named `TypeLoc` matching
/// `InnerMatcher`.
///
/// Given
/// \code
/// template <typename T>
/// class C {};
/// class C<int> c;
///
/// class D {};
/// class D d;
/// \endcode
/// elaboratedTypeLoc(hasNamedTypeLoc(templateSpecializationTypeLoc()));
/// matches the `TypeLoc` of the variable declaration of `c`, but not `d`.
AST_MATCHER_P(ElaboratedTypeLoc, hasNamedTypeLoc, internal::Matcher<TypeLoc>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedTypeLoc(), Finder, Builder);
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whoes decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Matches attributes.
/// Attributes may be attached with a variety of different syntaxes (including
/// keywords, C++11 attributes, GNU ``__attribute``` and MSVC `__declspec``,
/// and ``#pragma``s). They may also be implicit.
///
/// Given
/// \code
/// struct [[nodiscard]] Foo{};
/// void bar(int * __attribute__((nonnull)) );
/// __declspec(noinline) void baz();
///
/// #pragma omp declare simd
/// int min();
/// \endcode
/// attr()
/// matches "nodiscard", "nonnull", "noinline", and the whole "#pragma" line.
extern const internal::VariadicAllOfMatcher<Attr> attr;
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
if (Finder->isTraversalIgnoringImplicitNodes() && !I->isWritten())
continue;
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
ASTChildrenNotSpelledInSourceScope RAII(Finder, false);
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) {
return anyOf(
gnuNullExpr(), cxxNullPtrLiteralExpr(),
integerLiteral(equals(0), hasParent(expr(hasType(pointerType())))));
}
/// Matches the DecompositionDecl the binding belongs to.
///
/// For example, in:
/// \code
/// void foo()
/// {
/// int arr[3];
/// auto &[f, s, t] = arr;
///
/// f = 42;
/// }
/// \endcode
/// The matcher:
/// \code
/// bindingDecl(hasName("f"),
/// forDecomposition(decompositionDecl())
/// \endcode
/// matches 'f' in 'auto &[f, s, t]'.
AST_MATCHER_P(BindingDecl, forDecomposition, internal::Matcher<ValueDecl>,
InnerMatcher) {
if (const ValueDecl *VD = Node.getDecomposedDecl())
return InnerMatcher.matches(*VD, Finder, Builder);
return false;
}
/// Matches the Nth binding of a DecompositionDecl.
///
/// For example, in:
/// \code
/// void foo()
/// {
/// int arr[3];
/// auto &[f, s, t] = arr;
///
/// f = 42;
/// }
/// \endcode
/// The matcher:
/// \code
/// decompositionDecl(hasBinding(0,
/// bindingDecl(hasName("f").bind("fBinding"))))
/// \endcode
/// matches the decomposition decl with 'f' bound to "fBinding".
AST_MATCHER_P2(DecompositionDecl, hasBinding, unsigned, N,
internal::Matcher<BindingDecl>, InnerMatcher) {
if (Node.bindings().size() <= N)
return false;
return InnerMatcher.matches(*Node.bindings()[N], Finder, Builder);
}
/// Matches any binding of a DecompositionDecl.
///
/// For example, in:
/// \code
/// void foo()
/// {
/// int arr[3];
/// auto &[f, s, t] = arr;
///
/// f = 42;
/// }
/// \endcode
/// The matcher:
/// \code
/// decompositionDecl(hasAnyBinding(bindingDecl(hasName("f").bind("fBinding"))))
/// \endcode
/// matches the decomposition decl with 'f' bound to "fBinding".
AST_MATCHER_P(DecompositionDecl, hasAnyBinding, internal::Matcher<BindingDecl>,
InnerMatcher) {
return llvm::any_of(Node.bindings(), [&](const auto *Binding) {
return InnerMatcher.matches(*Binding, Finder, Builder);
});
}
/// Matches declaration of the function the statement belongs to.
///
/// Deprecated. Use forCallable() to correctly handle the situation when
/// the declaration is not a function (but a block or an Objective-C method).
/// forFunction() not only fails to take non-functions into account but also
/// may match the wrong declaration in their presence.
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while (!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if (const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if (InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if (const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if (InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder,
Builder)) {
return true;
}
} else {
for (const auto &Parent : Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches declaration of the function, method, or block the statement
/// belongs to.
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forCallable(functionDecl(hasName("operator="))))
/// matches 'return *this'
/// but does not match 'return v > 0'
///
/// Given:
/// \code
/// -(void) foo {
/// int x = 1;
/// dispatch_sync(queue, ^{ int y = 2; });
/// }
/// \endcode
/// declStmt(forCallable(objcMethodDecl()))
/// matches 'int x = 1'
/// but does not match 'int y = 2'.
/// whereas declStmt(forCallable(blockDecl()))
/// matches 'int y = 2'
/// but does not match 'int x = 1'.
AST_MATCHER_P(Stmt, forCallable, internal::Matcher<Decl>, InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while (!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if (const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if (InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if (const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if (InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder,
Builder)) {
return true;
}
} else if (const auto *ObjCMethodDeclNode = CurNode.get<ObjCMethodDecl>()) {
if (InnerMatcher.matches(*ObjCMethodDeclNode, Finder, Builder)) {
return true;
}
} else if (const auto *BlockDeclNode = CurNode.get<BlockDecl>()) {
if (InnerMatcher.matches(*BlockDeclNode, Finder, Builder)) {
return true;
}
} else {
for (const auto &Parent : Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder,
Builder) != Clauses.end();
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and
/// ``default(firstprivate)``
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
/// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind
/// specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isFirstPrivateKind())`` matches only
/// ``default(firstprivate)``.
AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return llvm::omp::isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
sgeadd.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgeadd.c, normal z -> s, Fri Sep 28 17:38:05 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_geadd
*
* Performs an addition of two general rectangular matrices similarly to the
* psgeadd() function from the PBLAS library:
*
* \f[ B = \alpha * op( A ) + \beta * B, \f]
*
* where op( X ) is one of:
* \f[ op( X ) = X, \f]
* \f[ op( X ) = X^T, \f]
* \f[ op( X ) = X^T, \f]
*
* alpha and beta are scalars and A, B are matrices with op( A ) an m-by-n or
* n-by-m matrix depending on the value of transa and B an m-by-n matrix.
*
*******************************************************************************
*
* @param[in] transa
* Specifies whether the matrix A is non-transposed, transposed, or
* conjugate transposed
* - PlasmaNoTrans: op( A ) = A
* - PlasmaTrans: op( A ) = A^T
* - PlasmaConjTrans: op( A ) = A^T
*
* @param[in] m
* Number of rows of the matrices op( A ) and B.
* m >= 0.
*
* @param[in] n
* Number of columns of the matrices op( A ) and B.
* n >= 0.
*
* @param[in] alpha
* Scalar factor of A.
*
* @param[in] pA
* Matrix of size lda-by-k, where k is n when transa == PlasmaNoTrans
* and m otherwise.
*
* @param[in] lda
* Leading dimension of the array A. lda >= max(1,l), where l is m
* when transa = PlasmaNoTrans and n otherwise.
*
* @param[in] beta
* Scalar factor of B.
*
* @param[in,out] pB
* Matrix of size ldb-by-n.
* On exit, B = alpha * op( A ) + beta * B
*
* @param[in] ldb
* Leading dimension of the array B.
* ldb >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_sgeadd
* @sa plasma_cgeadd
* @sa plasma_dgeadd
* @sa plasma_sgeadd
*
******************************************************************************/
int plasma_sgeadd(plasma_enum_t transa,
int m, int n,
float alpha, float *pA, int lda,
float beta, float *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
return -1;
}
if (m < 0) {
plasma_error("illegal value of m");
return -2;
}
if (n < 0) {
plasma_error("illegal value of n");
return -3;
}
if (pA == NULL) {
plasma_error("NULL A");
return -5;
}
int am, an;
if (transa == PlasmaNoTrans) {
am = m;
an = n;
}
else {
am = n;
an = m;
}
int bm = m;
int bn = n;
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -6;
}
if (pB == NULL) {
plasma_error("NULL B");
return -8;
}
if (ldb < imax(1, bm)) {
plasma_error("illegal value of ldb");
return -9;
}
// quick return
if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_geadd(plasma, PlasmaRealFloat, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
am, an, 0, 0, am, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealFloat, nb, nb,
bm, bn, 0, 0, bm, bn, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_sge2desc(pA, lda, A, &sequence, &request);
plasma_omp_sge2desc(pB, ldb, B, &sequence, &request);
// Call tile async function.
plasma_omp_sgeadd(transa,
alpha, A,
beta, B,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_sdesc2ge(A, pA, lda, &sequence, &request);
plasma_omp_sdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_geadd
*
* Performs an addition of two general rectangular matrices similarly to the
* psgeadd() function from the PBLAS library. Non-blocking tile version of
* plasma_sgeadd(). May return before the computation is finished. Operates on
* matrices stored by tiles. All matrices are passed through descriptors. All
* dimensions are taken from the descriptors. Allows for pipelining of
* operations at runtime.
*
*******************************************************************************
*
* @param[in] transa
* Specifies whether the matrix A is non-transposed, transposed, or
* conjugate transposed
* - PlasmaNoTrans: op( A ) = A
* - PlasmaTrans: op( A ) = A^T
* - PlasmaConjTrans: op( A ) = A^T
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check the
* sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_sgeadd
* @sa plasma_omp_cgeadd
* @sa plasma_omp_dgeadd
* @sa plasma_omp_sgeadd
*
******************************************************************************/
void plasma_omp_sgeadd(plasma_enum_t transa,
float alpha, plasma_desc_t A,
float beta, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
int am = transa == PlasmaNoTrans ? A.m : A.n;
if ((alpha == 0.0 || am == 0) && beta == 1.0)
return;
// Call the parallel function.
plasma_psgeadd(transa,
alpha, A,
beta, B,
sequence, request);
}
|
pixrender.c | /**
* My solution to being unable to
* apply per-pixel effects using
* the SDL2 primitive render functions
*
* (See header file for details on
* PixBuffers)
*/
#include <omp.h>
#include "pixrender.h"
uint32_t getColor(uint8_t r, uint8_t g, uint8_t b, uint8_t a);
/**
* Precomputed 4x4 bayer matrix
* to be used for ordered dithering
* based on 4 patterns
* (see PixBuffer_orderedDither)
*/
const double ditherMatrix[16] = {
-0.875, 0.125, -0.625, 0.375,
0.625, -0.375, 0.875, -0.125,
-0.5, 0.5, -0.75, 0.25,
1.0, 0.0, 0.75, -0.25
};
PixBuffer* PixBuffer_initPixBuffer(uint32_t width, uint32_t height)
{
PixBuffer* newBuffer = (PixBuffer*)malloc(sizeof(PixBuffer));
newBuffer->pixels = (uint32_t*)malloc(sizeof(uint32_t)*width*height);
newBuffer->width = width;
newBuffer->height = height;
return newBuffer;
}
void PixBuffer_delPixBuffer(PixBuffer* buffer)
{
free(buffer->pixels);
free(buffer);
}
/** PixBuffer_drawColumn
* @brief Draws a column to a pixel buffer
* Note: drawColumn <b>does not</b> check x bound
* Ensure that your draw functions choose
* an X value less than the buffer width
* @param buffer PixBuffer struct to write to
* @param x x coordinate of column
* @param y y coordinate of <b>top</b> of column
* @param h height of column
**/
void PixBuffer_drawColumn(PixBuffer* buffer, uint32_t x, int32_t y, int32_t h, SDL_Color color)
{
if (y < 0)
{
h = h + y;
y = 0;
}
if (y + h > buffer->height)
{
h = buffer->height - y;
}
////#pragma omp parallel for schedule(dynamic,1)
for (int32_t i = y; i < y + h; i++)
{
PixBuffer_drawPix(buffer, x, i, PixBuffer_toPixColor(color.r,color.g,color.b,color.a));
}
}
/** PixBuffer_drawRow
* @brief Draws a row to a pixel buffer
* Note: drawRow <b>does not</b> check x <b>or</b>
* y bounds. Be careful to ensure x, w, and y
* parameters are within the buffer size
* @param buffer PixBuffer struct to write to
* @param x x coordinate of <b>left</b> of row
* @param y y coordinate of row
* @param w width of row
**/
void PixBuffer_drawRow(PixBuffer* buffer, uint32_t x, uint32_t y, uint32_t w, SDL_Color color)
{
int r = color.r;
int g = color.g;
int b = color.b;
int a = color.a;
double alpha = ((double)(color.a))/255.0;
for (int32_t i = x; i < w; i++)
{
if (a) // Alpha transparency, compute alpha based on array colors
{
uint32_t oldPix = buffer->pixels[(y*buffer->width)+(i)];
int oldR = (int)(oldPix >> 3*8);
int oldG = (int)((oldPix >> 2*8) & 0xFF);
int oldB = (int)((oldPix >> 8) & 0xFF);
r = (int)((double)(color.r) * alpha + (double)oldR * (1.0-alpha));
g = (int)((double)(color.g) * alpha + (double)oldG * (1.0-alpha));
b = (int)((double)(color.b) * alpha + (double)oldB * (1.0-alpha));
PixBuffer_drawPix(buffer, i, y, PixBuffer_toPixColor(r,g,b,0xff));
}
}
}
/** PixBuffer_drawRect
* @brief Draws a filled rectangle to a pixel buffer
*
* @param buffer PixBuffer struct to write to
* @param rect SDL_Rect struct with coordinate and dimension data
**/
void PixBuffer_drawRect(PixBuffer* buffer, SDL_Rect* rect, SDL_Color color)
{
if (rect->x < buffer->width)
{
for (uint32_t i = rect->x; i < rect->x + rect->w; i++)
{
if (i < buffer->width)
{
PixBuffer_drawColumn(buffer, i, rect->y, rect->h, color);
}
}
}
}
void PixBuffer_drawHorizGradient(PixBuffer* buffer, SDL_Rect* rect, SDL_Color colTop, SDL_Color colBottom)
{
if (rect->x < buffer->width && rect->x+rect->w <= buffer->width)
{
double rStep = ((double)colBottom.r - (double)colTop.r) / rect->h;
double gStep = ((double)colBottom.g - (double)colTop.g) / rect->h;
double bStep = ((double)colBottom.b - (double)colTop.b) / rect->h;
double aStep = ((double)colBottom.a - (double)colTop.a) / rect->h;
SDL_Color drawColor;
//#pragma omp parallel for schedule(dynamic,1) private(drawColor)
for (uint32_t i = 0; i < rect->h; i++)
{
if (i < buffer->height)
{
drawColor.r = colTop.r+(int)(rStep*i);
drawColor.g = colTop.g+(int)(gStep*i);
drawColor.b = colTop.b+(int)(bStep*i);
drawColor.a = colTop.a+(int)(aStep*i);
PixBuffer_drawRow(buffer, rect->x, rect->y+i, rect->w, drawColor);
}
}
}
}
void PixBuffer_mergeBuffer(PixBuffer* target, PixBuffer* source, double alpha)
{
uint32_t sourcePix;
for (uint32_t i = 0; i < source->height; i++)
{
if (i < target->height)
{
for (uint32_t j = 0; j < source->width; j++)
{
if (j < target->width)
{
sourcePix = source->pixels[j+i*source->width];
PixBuffer_drawPixAlpha(target, j, i, sourcePix, alpha);
}
}
}
}
}
void PixBuffer_fillBuffer(PixBuffer* target, uint32_t color, double alpha)
{
for (uint32_t i = 0; i < target->height; i++)
{
if (i < target->height)
{
for (uint32_t j = 0; j < target->width; j++)
{
if (j < target->width)
{
PixBuffer_drawPixAlpha(target, j, i, color, alpha);
}
}
}
}
}
void PixBuffer_drawBuffOffset(PixBuffer* target, PixBuffer* source, uint32_t x, uint32_t y, int32_t xOff)
{
int32_t xCoord;
for (uint32_t i = 0; i < source->height; i++)
{
if (i < target->height)
{
for (uint32_t j = 0; j < source->width; j++)
{
if (j < target->width)
{
xCoord = (j + xOff) % target->width;
target->pixels[j+i*target->width] = source->pixels[xCoord+i*target->width];
}
}
}
}
}
/** PixBuffer_clearBuffer
* @brief Clears buffer array to 0x00
* * Useful if you need to quickly reuse a buffer
* * for drawing layers/graphics updates. Sets to
* * transparent black using memset
* @param buffer PixBuffer struct to clear
**/
void PixBuffer_clearBuffer(PixBuffer* buffer)
{
memset(buffer->pixels, 0, buffer->width * buffer->height * 4);
}
/** PixBuffer_paletteFilter
* @brief Remaps RGB buffer colors to a given pallette
* * Note: it is important to ensure paletteNum is no longer than
* * the palette list, otherwise your this will index nonexistant colors
* * and make your output look really funky. And possibly segfault I guess
* @param buffer PixBuffer to palettize
* @param palette SDL_Color array to quantitize to
* @param paletteNum length of color palette
* @todo consolidate palletteFilter and nearestColor functions
**/
void PixBuffer_paletteFilter(PixBuffer* buffer, SDL_Color* palette, int paletteNum)
{
int r;
int g;
int b;
int colNum = 0;
for (uint32_t p = 0; p < buffer->width * buffer->height; p++)
{
if (buffer->pixels[p] != 0)
{
r = (int)(buffer->pixels[p] >> 3*8);
g = (int)((buffer->pixels[p] >> 2*8) & 0xFF);
b = (int)((buffer->pixels[p] >> 8) & 0xFF);
uint32_t minColorDif = 0xFF*0xFF*3;//adjustedColorDiff(r, g, b, colorPallette[0].r, colorPallette[0].g, colorPallette[0].b);
for (int i = 0; i < paletteNum; i++)
{
uint32_t colorDif = (uint32_t)(palette[i].r - r)*(palette[i].r - r) + (uint32_t)(palette[i].g - g)*(palette[i].g - g) + (uint32_t)(palette[i].b - b)*(palette[i].b - b);
//double colorDif = adjustedColorDiff(r, g, b, colorPallette[i].r, colorPallette[i].g, colorPallette[i].b);//(uint32_t)(rFact * (double)(colorPallette[i].r - r))*(rFact * (double)(colorPallette[i].r - r)) + (gFact * (double)(colorPallette[i].g - g))*(gFact * (double)(colorPallette[i].g - g)) + (bFact * (double)(colorPallette[i].b - b))*(bFact * (double)(colorPallette[i].b - b));
if (colorDif < minColorDif)
{
minColorDif = colorDif;
colNum = i;
}
}
buffer->pixels[p] = (uint32_t)(palette[colNum].r) << 3*8 | (uint32_t)(palette[colNum].g) << 2*8 | (uint32_t)(palette[colNum].b) << 8 | (uint32_t)0xFF;
}
}
}
/** getNearestColor
* @brief Internal function called by orderedDither for quantitization
*
* @param palette SDL_Color array to quantitize to
* @param paletteNum length of color palette
* @param colorDat buffer format color to quantititize
* @return buffer format color of closest palette match
**/
uint32_t getNearestColor(SDL_Color* palette, int paletteNum, uint32_t colorDat)
{
int r = (int)(colorDat >> 3*8);
int g = (int)((colorDat >> 2*8) & 0xFF);
int b = (int)((colorDat >> 8) & 0xFF);
int colNum = 0;
uint32_t minColorDif = 0xFF*0xFF*3;//adjustedColorDiff(r, g, b, colorPallette[0].r, colorPallette[0].g, colorPallette[0].b);
for (int i = 0; i < paletteNum; i++)
{
uint32_t colorDif = (uint32_t)(palette[i].r - r)*(palette[i].r - r) + (uint32_t)(palette[i].g - g)*(palette[i].g - g) + (uint32_t)(palette[i].b - b)*(palette[i].b - b);
if (colorDif < minColorDif)
{
minColorDif = colorDif;
colNum = i;
}
}
return (uint32_t)(palette[colNum].r) << 3*8 | (uint32_t)(palette[colNum].g) << 2*8 | (uint32_t)(palette[colNum].b) << 8 | (uint32_t)0xFF;
}
/**
* PixBuffer_orderDither
* The algorithm for this is somewhat based on the pseudocode
* from the wikipedia page, but adapted once I found out
* how this works
* @brief Applies an ordered dither effect to a buffer
*
* @param buffer PixBuffer to dither
* @param palette SDL_Color array to quantitize to
* @param paletteNum length of color palette
* @param scaleFactor intensity of dither weights
**/
void PixBuffer_orderDither(PixBuffer* buffer, SDL_Color* palette, int paletteNum, double scaleFactor)
{
// Components to decode RGBA format
int32_t r;
int32_t g;
int32_t b;
int32_t dithFactor;
// default: 4
// How much the matrix weights should vary the input colors
int32_t newColor;
//#pragma omp parallel for schedule(dynamic,1) private(r,g,b,dithFactor,newColor)
for (uint32_t y = 0; y < buffer->height; y++)
{
for (uint32_t x = 0; x < buffer->width; x++)
{
if (buffer->pixels[y*buffer->width+x] != 0)
{
r = (int)(buffer->pixels[y*buffer->width+x] >> 3*8);
g = (int)((buffer->pixels[y*buffer->width+x] >> 2*8) & 0xFF);
b = (int)((buffer->pixels[y*buffer->width+x] >> 8) & 0xFF);
// Finds associated dither weight, which will
// be applied to the color to bring it above or below the threshold
// for getNearestColor to assign a varied brightness
dithFactor = scaleFactor*ditherMatrix[(y%4)*4+(x%4)];
r = (int)(r + dithFactor);
if (r > 255)
{
r = 255;
}
else if (r < 0)
{
r = 0;
}
g = (int)(g + dithFactor);
if (g > 255)
{
g = 255;
}
else if (g < 0)
{
g = 0;
}
b = (int)(b + dithFactor);
if (b > 255)
{
b = 255;
}
else if (b < 0)
{
b = 0;
}
newColor = (uint32_t)(r) << 3*8 | (uint32_t)(g) << 2*8 | (uint32_t)(b) << 8 | (uint32_t)0xFF;
buffer->pixels[y*buffer->width+x] = getNearestColor(palette, paletteNum, newColor);
}
}
}
}
/** to8BitColor
* @brief Paletizes 32bit color to 8bit color
*
* @param colorDat Raw truecolor value to paletize
* @return 8 bit color value
*/
uint32_t to8BitColor(uint32_t colorDat)
{
int r = (int)(colorDat >> 3*8);
int g = (int)((colorDat >> 2*8) & 0xFF);
int b = (int)((colorDat >> 8) & 0xFF);
int newR = (int)ceil(round((double)r / 255.0*15) * (255.0/15));
int newG = (int)ceil(round((double)g / 255.0*15) * (255.0/15));
int newB = (int)ceil(round((double)b / 255.0*15) * (255.0/15));
return (uint32_t)(newR) << 3*8 | (uint32_t)(newG) << 2*8 | (uint32_t)newB << 8 | (uint32_t)0xFF;
}
/** PixBuffer_orderDither256
* Uses matrix dithering to palletize truecolor buffer to
* 8-bit 256 color pallette
* @brief Applies 256 color dithering filter to buffer
*
* @param buffer PixBuffer to apply filter to
* @param scaleFactor Stength of dithering. Multiplies values in
* matrix to increase extremety of offsets
**/
void PixBuffer_orderDither256(PixBuffer* buffer, double scaleFactor)
{
// Components to decode RGBA format
int32_t r;
int32_t g;
int32_t b;
int32_t dithFactor;
// default: 4
// How much the matrix weights should vary the input colors
int32_t newColor;
for (uint32_t y = 0; y < buffer->height; y++)
{
for (uint32_t x = 0; x < buffer->width; x++)
{
if (buffer->pixels[y*buffer->width+x] != 0)
{
r = (int)(buffer->pixels[y*buffer->width+x] >> 3*8);
g = (int)((buffer->pixels[y*buffer->width+x] >> 2*8) & 0xFF);
b = (int)((buffer->pixels[y*buffer->width+x] >> 8) & 0xFF);
// Finds associated dither weight, which will
// be applied to the color to bring it above or below the threshold
// for getNearestColor to assign a varied brightness
dithFactor = scaleFactor*ditherMatrix[(y%4)*4+(x%4)];
r = (int)(r + dithFactor);
if (r > 255)
{
r = 255;
}
else if (r < 0)
{
r = 0;
}
g = (int)(g + dithFactor);
if (g > 255)
{
g = 255;
}
else if (g < 0)
{
g = 0;
}
b = (int)(b + dithFactor);
if (b > 255)
{
b = 255;
}
else if (b < 0)
{
b = 0;
}
newColor = (uint32_t)(r) << 3*8 | (uint32_t)(g) << 2*8 | (uint32_t)(b) << 8 | (uint32_t)0xFF;
buffer->pixels[y*buffer->width+x] = to8BitColor(newColor);
}
}
}
}
/** PixBuffer_monochromeFilter
* * Note: Does not check fade percentage, could overflow color values
* @brief Monochrome filter with selectable target color and saturation
* @param buffer PixBuffer to apply filter to
* @param targetColor Color to adjust chrominance towards
* @param fadePercent Degree of monochromatic-ness (inverse saturation)
**/
void PixBuffer_monochromeFilter(PixBuffer* buffer, SDL_Color targetColor, double fadePercent)
{
SDL_Color oldColor;
int targetAvg;
uint32_t newColor;
double targetR = targetColor.r/255.0;
double targetG = targetColor.g/255.0;
double targetB = targetColor.b/255.0;
int dr;
int dg;
int db;
for (int y = 0; y < buffer->height; y++)
{
for (int x = 0; x < buffer->width; x++)
{
oldColor = PixBuffer_toSDLColor(PixBuffer_getPix(buffer, x, y));
targetAvg = (oldColor.r + oldColor.g + oldColor.b) / 3;
dr = (targetAvg * targetR - oldColor.r) * fadePercent;
dg = (targetAvg * targetG - oldColor.g) * fadePercent;
db = (targetAvg * targetB - oldColor.b) * fadePercent;
newColor = PixBuffer_toPixColor((uint8_t)(oldColor.r + dr), (uint8_t)(oldColor.g + dg), (uint8_t)(oldColor.b + db), (uint8_t)oldColor.a);
PixBuffer_drawPix(buffer, x, y, newColor);
}
}
}
/** PixBuffer_inverseFilter
* @brief Inverts the RGB channels of all pixels in a PixBuffer
* @param buffer PixBuffer to swap channels of
**/
void PixBuffer_inverseFilter(PixBuffer* buffer)
{
SDL_Color oldColor;
uint32_t newColor;
int r;
int g;
int b;
for (int y = 0; y < buffer->height; y++)
{
for (int x = 0; x < buffer->width; x++)
{
oldColor = PixBuffer_toSDLColor(PixBuffer_getPix(buffer, x, y));
r = (255 - oldColor.r);
g = (255 - oldColor.g);
b = (255 - oldColor.b);
newColor = PixBuffer_toPixColor((uint8_t)r, (uint8_t)g, (uint8_t)b, (uint8_t)oldColor.a);
PixBuffer_drawPix(buffer, x, y, newColor);
}
}
}
/** PixBuffer_toPixColor
* @brief Returns color formatted to RGBA format
* @param r SDL_Color red component
* @param g SDL_Color green component
* @param b SDL_Color blue component
* @param a SDL_Color alpha component
**/
uint32_t PixBuffer_toPixColor(uint8_t r, uint8_t g, uint8_t b, uint8_t a)
{
return ((uint32_t)r << 3*8 | (uint32_t)g << 2*8 | (uint32_t)b << 8 | (uint32_t)a);
}
SDL_Color PixBuffer_toSDLColor(uint32_t pixColor)
{
int r = (int)(pixColor >> 3*8);
int g = (int)((pixColor >> 2*8) & 0xFF);
int b = (int)((pixColor >> 8) & 0xFF);
int a = (int)(pixColor & 0xFF);
SDL_Color newColor = {r, g, b, a};
return newColor;
}
uint32_t PixBuffer_blendAlpha(uint32_t baseColor, uint32_t addColor, double alphaNum)
{
SDL_Color newSDLColor;
uint32_t newColor;
int addR = (int)(addColor >> 3*8);
int addG = (int)((addColor >> 2*8) & 0xFF);
int addB = (int)((addColor >> 8) & 0xFF);
int addA = (int)(addColor & 0xFF);
if (alphaNum*addA != 0 && alphaNum*addA != 255) // Alpha transparency, compute alpha based on array colors
{
double alpha = ((double)addA)/255.0 * (alphaNum);
int oldR = (int)(baseColor >> 3*8);
int oldG = (int)((baseColor >> 2*8) & 0xFF);
int oldB = (int)((baseColor >> 8) & 0xFF);
int oldA = (int)(baseColor & 0xFF);
newSDLColor.r = (int)((double)addR * alpha + (double)oldR * (1-alpha));
newSDLColor.g = (int)((double)addG * alpha + (double)oldG * (1-alpha));
newSDLColor.b = (int)((double)addB * alpha + (double)oldB * (1-alpha));
if (oldA == 255)
{
newSDLColor.a = 255;
}
else
{
newSDLColor.a = (int)((double)addA * alpha + (double)oldA * (1-alpha));
}
newColor = PixBuffer_toPixColor(newSDLColor.r, newSDLColor.g, newSDLColor.b, newSDLColor.a);
}
else
{
newColor = baseColor;
}
return newColor;
}
uint32_t PixBuffer_getPix(PixBuffer* buffer, uint32_t x, uint32_t y)
{
return buffer->pixels[x + y * buffer->width];
}
uint32_t PixBuffer_getTex(RayTex* texture, uint8_t tileNum, uint32_t x, uint32_t y)
{
return texture->pixData[(tileNum*texture->tileHeight + y) * texture->tileWidth + x];
}
/** PixBuffer_drawPix
* @brief Draws a single pixel to the PixBuffer
* @param buffer PixBuffer to draw to
* @param x x coordinate of pixel
* @param y y coordinate of pixel
**/
void PixBuffer_drawPix(PixBuffer* buffer, uint32_t x, uint32_t y, uint32_t color)
{
if (x < buffer->width && y < buffer->height)
{
buffer->pixels[y*buffer->width+x] = color;
}
}
void PixBuffer_drawPixAlpha(PixBuffer* buffer, uint32_t x, uint32_t y, uint32_t color, double alphaNum)
{
int r = (int)(color >> 3*8);
int g = (int)((color >> 2*8) & 0xFF);
int b = (int)((color >> 8) & 0xFF);
int a = (int)(color & 0xFF);
if (a)
{
if (alphaNum*a != 0 && alphaNum*a != 255) // Alpha transparency, compute alpha based on array colors
{
double alpha = ((double)a)/255.0 * (alphaNum);
uint32_t oldPix = buffer->pixels[y*buffer->width+x];
int oldR = (int)(oldPix >> 3*8);
int oldG = (int)((oldPix >> 2*8) & 0xFF);
int oldB = (int)((oldPix >> 8) & 0xFF);
int oldA = (int)(oldPix & 0xFF);
r = (int)((double)r * alpha + (double)oldR * (1-alpha));
g = (int)((double)g * alpha + (double)oldG * (1-alpha));
b = (int)((double)b * alpha + (double)oldB * (1-alpha));
a = (int)((double)a * alpha + (double)oldA * (1-alpha));
}
PixBuffer_drawPix(buffer, x, y, PixBuffer_toPixColor(r,g,b,a));
}
}
void PixBuffer_drawPixDouble(PixBuffer* buffer, double x, double y, uint32_t color, double alphaNum)
{
uint32_t baseX = (uint32_t)floor(x);
uint32_t baseY = (uint32_t)floor(y);
double partX = x - baseX;
double partY = y - baseY;
if (x >= 0 && y >= 0)
{
PixBuffer_drawPixAlpha(buffer, baseX, baseY, color, alphaNum);
}
if (partX > 0.5)
{
baseX++;
PixBuffer_drawPixAlpha(buffer, baseX, baseY, color, alphaNum);
}
else if (partX < -0.5)
{
baseX--;
PixBuffer_drawPixAlpha(buffer, baseX, baseY, color, alphaNum);
}
if (partY > 0.5)
{
baseY++;
PixBuffer_drawPixAlpha(buffer, baseX, baseY, color, alphaNum);
}
else if (partY < -0.5)
{
baseY--;
PixBuffer_drawPixAlpha(buffer, baseX, baseY, color, alphaNum);
}
//PixBuffer_drawPixAlpha(buffer, baseX, baseY, color, alphaNum);
}
// RAYTEX FUNCTIONS
RayTex* RayTex_initFromRGBA(uint8_t* rgbaData, uint32_t tileWidth, uint32_t tileHeight, uint8_t numTiles)
{
RayTex* newTex = (RayTex*)malloc(sizeof(RayTex));
newTex->tileWidth = tileWidth;
newTex->tileHeight = tileHeight;
newTex->tileCount = numTiles;
// Convert color chars into pixel ints
newTex->pixData =
(uint32_t*)malloc(sizeof(uint32_t)*tileWidth*tileHeight*numTiles);
uint32_t newPix = 0;
for (uint32_t p = 0; p < tileWidth * tileHeight * numTiles; p++)
{
// Get each component
for (uint8_t comp = 0; comp < 4; comp++)
{
newPix |= ((uint32_t)(rgbaData[p*4+comp]) << (8 * (3-comp)));
}
newTex->pixData[p] = newPix;
newPix = 0;
}
return newTex;
}
void RayTex_delRayTex(RayTex* tex)
{
free(tex->pixData);
free(tex);
}
|
mlp_example_f32_numa.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <libxsmm_sync.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
#include <numa.h>
#define CHECK_L1
/* include c-based dnn library */
#include "../common/dnn_common.h"
LIBXSMM_INLINE void my_init_buf(float* buf, size_t size, int initPos, int initOne)
{
int i;
zero_buf(buf, size);
for (i = 0; i < (int)size; ++i) {
buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0)));
}
}
typedef enum my_eltwise_fuse {
MY_ELTWISE_FUSE_NONE = 0,
MY_ELTWISE_FUSE_BIAS = 1,
MY_ELTWISE_FUSE_RELU = 2,
MY_ELTWISE_FUSE_BIAS_RELU = MY_ELTWISE_FUSE_BIAS | MY_ELTWISE_FUSE_RELU
} my_eltwise_fuse;
typedef enum my_pass {
MY_PASS_FWD = 1,
MY_PASS_BWD_D = 2,
MY_PASS_BWD_W = 4,
MY_PASS_BWD = 6
} my_pass;
typedef struct my_opt_config {
libxsmm_blasint C;
libxsmm_blasint K;
libxsmm_blasint bc;
libxsmm_blasint bk;
libxsmm_blasint threads;
float lr;
size_t scratch_size;
libxsmm_barrier* barrier;
} my_opt_config;
typedef struct my_smax_fwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint threads;
size_t scratch_size;
libxsmm_barrier* barrier;
} my_smax_fwd_config;
typedef struct my_smax_bwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint threads;
size_t scratch_size;
float loss_weight;
libxsmm_barrier* barrier;
} my_smax_bwd_config;
typedef struct my_fc_fwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint K;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint bk;
libxsmm_blasint threads;
my_eltwise_fuse fuse_type;
libxsmm_blasint fwd_bf;
libxsmm_blasint fwd_2d_blocking;
libxsmm_blasint fwd_col_teams;
libxsmm_blasint fwd_row_teams;
size_t scratch_size;
libxsmm_barrier* barrier;
libxsmm_smmfunction_reducebatch_strd gemm_fwd;
libxsmm_smmfunction_reducebatch_strd gemm_fwd2;
} my_fc_fwd_config;
typedef struct my_fc_bwd_config {
libxsmm_blasint N;
libxsmm_blasint C;
libxsmm_blasint K;
libxsmm_blasint bn;
libxsmm_blasint bc;
libxsmm_blasint bk;
libxsmm_blasint threads;
my_eltwise_fuse fuse_type;
libxsmm_blasint bwd_bf;
libxsmm_blasint bwd_2d_blocking;
libxsmm_blasint bwd_col_teams;
libxsmm_blasint bwd_row_teams;
libxsmm_blasint upd_bf;
libxsmm_blasint upd_2d_blocking;
libxsmm_blasint upd_col_teams;
libxsmm_blasint upd_row_teams;
libxsmm_blasint ifm_subtasks;
libxsmm_blasint ofm_subtasks;
size_t scratch_size;
libxsmm_barrier* barrier;
libxsmm_smmfunction_reducebatch_strd gemm_bwd;
libxsmm_smmfunction_reducebatch_strd gemm_bwd2;
libxsmm_smmfunction_reducebatch_strd gemm_upd;
libxsmm_smmfunction_reducebatch_strd gemm_upd2;
libxsmm_xtransfunction tr_kernel;
} my_fc_bwd_config;
typedef struct my_numa_thr_cfg {
int thr_s;
int thr_e;
int *blocksOFm_s;
int *blocksOFm_e;
int *blocksIFm_s;
int *blocksIFm_e;
int *blocksOFm_tr_s;
int *blocksOFm_tr_e;
int *blocksIFm_tr_s;
int *blocksIFm_tr_e;
float **scratch;
size_t *layer_size;
int **fwd_ofm_to_numa;
float *bwd_d_scratch;
size_t bwd_d_scratch_size;
float *bwd_w_scratch;
size_t bwd_w_layer_size;
} my_numa_thr_cfg;
my_fc_fwd_config setup_my_fc_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn,
libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) {
my_fc_fwd_config res;
libxsmm_blasint lda = bk;
libxsmm_blasint ldb = bc;
libxsmm_blasint ldc = bk;
float alpha = 1.0f;
float beta = 1.0f;
float zerobeta = 0.0f;
/* setting up some handle values */
res.N = N;
res.C = C;
res.K = K;
res.bn = bn;
res.bc = bc;
res.bk = bk;
res.threads = threads;
res.fuse_type = fuse_type;
/* setup parallelization strategy */
if (threads == 16) {
res.fwd_bf = 1;
res.fwd_2d_blocking = 1;
res.fwd_col_teams = 2;
res.fwd_row_teams = 8;
} else {
res.fwd_bf = 1;
res.fwd_2d_blocking = 0;
res.fwd_col_teams = 1;
res.fwd_row_teams = 1;
}
#if 0
res.fwd_bf = atoi(getenv("FWD_BF"));
res.fwd_2d_blocking = atoi(getenv("FWD_2D_BLOCKING"));
res.fwd_col_teams = atoi(getenv("FWD_COL_TEAMS"));
res.fwd_row_teams = atoi(getenv("FWD_ROW_TEAMS"));
#endif
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* TPP creation */
res.gemm_fwd = libxsmm_smmdispatch_reducebatch_strd(res.bk, res.bn, res.bc,
res.bk*res.bc*sizeof(float), res.bc*res.bn*sizeof(float),
&lda, &ldb, &ldc, &alpha, &beta, NULL, NULL);
if ( res.gemm_fwd == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd failed. Bailing...!\n");
exit(-1);
}
res.gemm_fwd2 = libxsmm_smmdispatch_reducebatch_strd(res.bk, res.bn, res.bc,
res.bk*res.bc*sizeof(float), res.bc*res.bn*sizeof(float),
&lda, &ldb, &ldc, &alpha, &zerobeta, NULL, NULL);
if ( res.gemm_fwd2 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd2 failed. Bailing...!\n");
exit(-1);
}
/* init scratch */
res.scratch_size = 0;
return res;
}
my_fc_bwd_config setup_my_fc_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn,
libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) {
my_fc_bwd_config res;
const libxsmm_trans_descriptor* tr_desc = 0;
libxsmm_descriptor_blob blob;
libxsmm_blasint lda = bc;
libxsmm_blasint ldb = bk;
libxsmm_blasint ldc = bc;
float alpha = 1.0f;
float beta = 1.0f;
float zerobeta = 0.0f;
int updflags = LIBXSMM_GEMM_FLAGS( 'N', 'T' );
libxsmm_blasint updM;
libxsmm_blasint updN;
/* setting up some handle values */
res.N = N;
res.C = C;
res.K = K;
res.bn = bn;
res.bc = bc;
res.bk = bk;
res.threads = threads;
res.fuse_type = fuse_type;
/* setup parallelization strategy */
if (threads == 16) {
res.bwd_bf = 1;
res.bwd_2d_blocking = 1;
res.bwd_col_teams = 2;
res.bwd_row_teams = 8;
res.upd_bf = 1;
res.upd_2d_blocking = 0;
res.upd_col_teams = 1;
res.upd_row_teams = 1;
res.ifm_subtasks = 1;
res.ofm_subtasks = 1;
} else {
res.bwd_bf = 1;
res.bwd_2d_blocking = 0;
res.bwd_col_teams = 1;
res.bwd_row_teams = 1;
res.upd_bf = 1;
res.upd_2d_blocking = 0;
res.upd_col_teams = 1;
res.upd_row_teams = 1;
res.ifm_subtasks = 1;
res.ofm_subtasks = 1;
}
#if 0
res.bwd_bf = atoi(getenv("BWD_BF"));
res.bwd_2d_blocking = atoi(getenv("BWD_2D_BLOCKING"));
res.bwd_col_teams = atoi(getenv("BWD_COL_TEAMS"));
res.bwd_row_teams = atoi(getenv("BWD_ROW_TEAMS"));
res.upd_bf = atoi(getenv("UPD_BF"));
res.upd_2d_blocking = atoi(getenv("UPD_2D_BLOCKING"));
res.upd_col_teams = atoi(getenv("UPD_COL_TEAMS"));
res.upd_row_teams = atoi(getenv("UPD_ROW_TEAMS"));
res.ifm_subtasks = atoi(getenv("IFM_SUBTASKS"));
res.ofm_subtasks = atoi(getenv("OFM_SUBTASKS"));
#endif
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* TPP creation */
/* BWD GEMM */
res.gemm_bwd = libxsmm_smmdispatch_reducebatch_strd(res.bc, res.bn, res.bk,
res.bk*res.bc*sizeof(float), res.bk*res.bn*sizeof(float),
&lda, &ldb, &ldc, &alpha, &beta, NULL, NULL);
if ( res.gemm_bwd == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd failed. Bailing...!\n");
exit(-1);
}
res.gemm_bwd2 = libxsmm_smmdispatch_reducebatch_strd(res.bc, res.bn, res.bk,
res.bk*res.bc*sizeof(float), res.bk*res.bn*sizeof(float),
&lda, &ldb, &ldc, &alpha, &zerobeta, NULL, NULL);
if ( res.gemm_bwd2 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd2 failed. Bailing...!\n");
exit(-1);
}
/* Transpose kernel used for weight transpose in bwd pass */
tr_desc = libxsmm_trans_descriptor_init(&blob, sizeof(float), res.bk, res.bc, res.bc);
res.tr_kernel = libxsmm_dispatch_trans(tr_desc);
if ( res.tr_kernel == NULL ) {
fprintf( stderr, "JIT for transpose TPP tr_kernel failed. Bailing...!\n");
exit(-1);
}
/* UPD GEMM */
lda = res.bk;
ldb = res.bc;
ldc = res.bk;
updM = res.bk/res.ofm_subtasks;
updN = res.bc/res.ifm_subtasks;
res.gemm_upd = libxsmm_smmdispatch_reducebatch_strd(updM, updN, res.bn,
res.K*res.bn*sizeof(float), res.C*res.bn*sizeof(float),
&lda, &ldb, &ldc, &alpha, &beta, &updflags, NULL);
if ( res.gemm_upd == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_upd failed. Bailing...!\n");
exit(-1);
}
res.gemm_upd2 = libxsmm_smmdispatch_reducebatch_strd(updM, updN, res.bn,
res.K*res.bn*sizeof(float), res.C*res.bn*sizeof(float),
&lda, &ldb, &ldc, &alpha, &zerobeta, &updflags, NULL);
if ( res.gemm_upd2 == NULL ) {
fprintf( stderr, "JIT for BRGEMM TPP gemm_upd2 failed. Bailing...!\n");
exit(-1);
}
/* init scratch */
res.scratch_size = sizeof(float) * ( (((size_t)res.C + (size_t)res.K) * (size_t)res.N) + ((size_t)res.C * (size_t)res.K) );
return res;
}
my_opt_config setup_my_opt(libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bc, libxsmm_blasint bk,
libxsmm_blasint threads, float lr) {
my_opt_config res;
/* setting up some handle values */
res.C = C;
res.K = K;
res.bc = bc;
res.bk = bk;
res.threads = threads;
res.lr = lr;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* init scratch */
res.scratch_size = 0;
return res;
}
my_smax_fwd_config setup_my_smax_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc,
libxsmm_blasint threads) {
my_smax_fwd_config res;
/* setting up some handle values */
res.C = C;
res.N = N;
res.bc = bc;
res.bn = bn;
res.threads = threads;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* init scratch */
res.scratch_size = 0;
return res;
}
my_smax_bwd_config setup_my_smax_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc,
libxsmm_blasint threads, float loss_weight) {
my_smax_bwd_config res;
/* setting up some handle values */
res.C = C;
res.N = N;
res.bc = bc;
res.bn = bn;
res.threads = threads;
res.loss_weight = loss_weight;
/* setting up the barrier */
res.barrier = libxsmm_barrier_create(threads, 1);
/* init scratch */
res.scratch_size = 0;
return res;
}
void my_fc_fwd_exec( my_fc_fwd_config cfg, const float* in_act_ptr, float* out_act_ptr,
const float* bias_ptr, unsigned char* relu_ptr, int start_tid, int my_tid, void* scratch, my_numa_thr_cfg *numa_thr_cfg, int layer) {
const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc;
const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk;
const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could be run in parallel */
const libxsmm_blasint work = nBlocksOFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ?
(work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
/* loop variables */
libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ifm1 = 0, mb2 = 0, ofm2 = 0;
libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0;
libxsmm_blasint my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0;
libxsmm_blasint my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0;
LIBXSMM_VLA_DECL(4, float, output, out_act_ptr, nBlocksOFm, cfg.bn, cfg.bk);
LIBXSMM_VLA_DECL(4, const float, input, in_act_ptr, nBlocksIFm, cfg.bn, cfg.bc);
LIBXSMM_VLA_DECL(4, const float, filter, numa_thr_cfg->scratch[layer], nBlocksIFm, cfg.bc, cfg.bk);
LIBXSMM_VLA_DECL(2, const float, bias, bias_ptr, cfg.bk);
LIBXSMM_VLA_DECL(4, unsigned char, relumask, relu_ptr, nBlocksOFm, cfg.bn, cfg.bk);
unsigned long long blocks = nBlocksIFm;
libxsmm_blasint CB_BLOCKS = nBlocksIFm, BF = 1;
LIBXSMM_UNUSED( scratch );
BF = cfg.fwd_bf;
CB_BLOCKS = nBlocksIFm/BF;
blocks = CB_BLOCKS;
col_teams = cfg.fwd_col_teams;
row_teams = cfg.fwd_row_teams;
my_row_id = ltid % row_teams;
my_col_id = ltid / row_teams;
N_tasks_per_thread = LIBXSMM_UPDIV(nBlocksMB, col_teams);
M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, row_teams);
my_N_start = LIBXSMM_MIN(my_col_id * N_tasks_per_thread, nBlocksMB);
my_N_end = LIBXSMM_MIN((my_col_id+1) * N_tasks_per_thread, nBlocksMB);
my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksOFm);
my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksOFm);
const libxsmm_blasint ofm_start = numa_thr_cfg->blocksOFm_s[layer];
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, ltid);
if (cfg.fwd_2d_blocking == 1) {
if (BF > 1) {
for (ifm1 = 0; ifm1 < BF; ++ifm1) {
for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) {
for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) {
/* Initialize output slice */
if ( ifm1 == 0 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk);
}
}
} else {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (float)0;
}
}
}
}
/* BRGEMM */
cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bc, cfg.bk),
&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
/* apply post BRGEMM fusion */
if ( ifm1 == BF-1 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk);
LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0);
l_cur_out = (l_cur_out > (float)0) ? l_cur_out : (float)0;
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out;
}
}
}
}
}
}
}
} else {
for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) {
for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk);
}
}
cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1-ofm_start, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
} else {
cfg.gemm_fwd2( &LIBXSMM_VLA_ACCESS(4, filter, ofm1-ofm_start, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
}
/* post GEMM fusion */
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk);
LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0);
l_cur_out = ( l_cur_out > (float)0 ) ? l_cur_out : (float)0;
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out;
}
}
}
}
}
}
} else {
if (BF > 1) {
for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) {
for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) {
mb1 = mb1ofm1%nBlocksMB;
ofm1 = mb1ofm1/nBlocksMB;
/* Initialize output slice */
if ( ifm1 == 0 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk);
}
}
} else {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (float)0;
}
}
}
}
/* BRGEMM */
cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bc, cfg.bk),
&LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
/* post GEMM fusion */
if ( ifm1 == BF-1 ) {
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk);
LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0);
l_cur_out = (l_cur_out > (float)0) ? l_cur_out : (float)0;
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out;
}
}
}
}
}
}
} else {
for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) {
mb1 = mb1ofm1%nBlocksMB;
ofm1 = mb1ofm1/nBlocksMB;
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk);
}
}
cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1-ofm_start, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
} else {
cfg.gemm_fwd2( &LIBXSMM_VLA_ACCESS(4, filter, ofm1-ofm_start, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk),
&LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc),
&LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks);
}
/* post GEMM fusion */
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk);
LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0);
l_cur_out = ( l_cur_out > (float)0 ) ? l_cur_out : (float)0;
LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out;
}
}
}
}
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
void my_fc_bwd_d_transpose( my_fc_bwd_config cfg, int my_tid, my_numa_thr_cfg **numa_thr_cfg_, int numa_node, int layer, int *ofm_to_node) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
/* Transpose kernel to transpose filters */
libxsmm_xtransfunction tr_kernel = cfg.tr_kernel;
/* here we assume that input and output blocking is similar */
const libxsmm_blasint bk = cfg.bk;
const libxsmm_blasint bc = cfg.bc;
const libxsmm_blasint nBlocksIFm = cfg.C / bc;
const libxsmm_blasint nBlocksOFm = cfg.K / bk;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - numa_thr_cfg[numa_node].thr_s;
const libxsmm_blasint l_nBlocksIFm = (numa_thr_cfg[numa_node].blocksIFm_tr_e[layer] - numa_thr_cfg[numa_node].blocksIFm_tr_s[layer]) + 1;
/* number of tasks for transpose that could be run in parallel */
const libxsmm_blasint transpose_work = l_nBlocksIFm * nBlocksOFm;
/* compute chunk size */
int thr = numa_thr_cfg[numa_node].thr_e - numa_thr_cfg[numa_node].thr_s;
const libxsmm_blasint transpose_chunksize = (transpose_work % thr == 0) ? (transpose_work / thr) : ((transpose_work / thr) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint transpose_thr_begin = (ltid * transpose_chunksize < transpose_work) ? (ltid * transpose_chunksize) : transpose_work;
const libxsmm_blasint transpose_thr_end = ((ltid + 1) * transpose_chunksize < transpose_work) ? ((ltid + 1) * transpose_chunksize) : transpose_work;
float *filter_tr = numa_thr_cfg[numa_node].bwd_d_scratch;
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, my_tid);
/* transpose weight */
int ifm1ofm1 = 0;
for (ifm1ofm1 = transpose_thr_begin; ifm1ofm1 < transpose_thr_end; ++ifm1ofm1) {
const unsigned int ubk = (unsigned int)bk;
const unsigned int ubc = (unsigned int)bc;
int ofm1 = ifm1ofm1 / l_nBlocksIFm;
int ifm1 = ifm1ofm1 % l_nBlocksIFm;
my_numa_thr_cfg *l_numa_thr_cfg = &numa_thr_cfg[ofm_to_node[ofm1]];
float *inp = l_numa_thr_cfg->scratch[layer];
inp = inp + (ofm1 - l_numa_thr_cfg->blocksOFm_s[layer]) * nBlocksIFm * bc * bk
+ (ifm1 + numa_thr_cfg[numa_node].blocksIFm_tr_s[layer]) * bc * bk;
float *out = filter_tr + ifm1 * nBlocksOFm * bk * bc + ofm1 * bk * bc;
tr_kernel(inp, &ubk, out, &ubc);
}
libxsmm_barrier_wait(cfg.barrier, my_tid);
}
void my_fc_bwd_exec( my_fc_bwd_config cfg, float* din_act_ptr,
float* dout_act_ptr, float* dwt_ptr, const float* in_act_ptr,
float* dbias_ptr, const unsigned char* relu_ptr, my_pass pass, int start_tid, int my_tid, void* scratch, my_numa_thr_cfg *numa_thr_cfg, int layer ) {
/* here we assume that input and output blocking is similar */
const libxsmm_blasint bn = cfg.bn;
const libxsmm_blasint bk = cfg.bk;
const libxsmm_blasint bc = cfg.bc;
const libxsmm_blasint nBlocksIFm = cfg.C / bc;
const libxsmm_blasint nBlocksOFm = cfg.K / bk;
const libxsmm_blasint nBlocksMB = cfg.N / bn;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks for transpose that could be run in parallel */
const libxsmm_blasint eltwise_work = nBlocksOFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint eltwise_chunksize = (eltwise_work % cfg.threads == 0) ? (eltwise_work / cfg.threads) : ((eltwise_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint eltwise_thr_begin = (ltid * eltwise_chunksize < eltwise_work) ? (ltid * eltwise_chunksize) : eltwise_work;
const libxsmm_blasint eltwise_thr_end = ((ltid + 1) * eltwise_chunksize < eltwise_work) ? ((ltid + 1) * eltwise_chunksize) : eltwise_work;
libxsmm_blasint mb1ofm1;
/* number of tasks for transpose that could be run in parallel */
const libxsmm_blasint dbias_work = nBlocksOFm;
/* compute chunk size */
const libxsmm_blasint dbias_chunksize = (dbias_work % cfg.threads == 0) ? (dbias_work / cfg.threads) : ((dbias_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint dbias_thr_begin = (ltid * dbias_chunksize < dbias_work) ? (ltid * dbias_chunksize) : dbias_work;
const libxsmm_blasint dbias_thr_end = ((ltid + 1) * dbias_chunksize < dbias_work) ? ((ltid + 1) * dbias_chunksize) : dbias_work;
/* loop variables */
libxsmm_blasint ofm1 = 0, mb1 = 0, ofm2 = 0, mb2 = 0;
float *grad_output_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? ((float*)scratch)+(cfg.C*cfg.K) : dout_act_ptr);
LIBXSMM_VLA_DECL(4, const float, doutput_orig, dout_act_ptr, nBlocksOFm, bn, bk);
LIBXSMM_VLA_DECL(4, float, doutput, grad_output_ptr, nBlocksOFm, bn, bk);
LIBXSMM_VLA_DECL(2, float, dbias, dbias_ptr, cfg.bk);
LIBXSMM_VLA_DECL(4, const unsigned char, relumask, relu_ptr, nBlocksOFm, cfg.bn, cfg.bk);
const libxsmm_blasint ifm_start = numa_thr_cfg->blocksIFm_tr_s[layer];
/* lazy barrier init */
libxsmm_barrier_init(cfg.barrier, ltid);
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) {
for ( mb1ofm1 = eltwise_thr_begin; mb1ofm1 < eltwise_thr_end; ++mb1ofm1 ) {
mb1 = mb1ofm1%nBlocksMB;
ofm1 = mb1ofm1/nBlocksMB;
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
float l_cur_out = LIBXSMM_VLA_ACCESS(4, doutput_orig, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk);
l_cur_out = (LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) != 0) ? l_cur_out : (float)0;
LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out;
}
}
}
/* wait for eltwise to finish */
libxsmm_barrier_wait(cfg.barrier, ltid);
}
if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) {
for ( ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS( 2, dbias, ofm1, ofm2, cfg.bk ) = 0.0f;
}
for ( mb1 = 0; mb1 < nBlocksMB; ++mb1 ) {
for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) {
for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) {
LIBXSMM_VLA_ACCESS( 2, dbias, ofm1, ofm2, cfg.bk ) += LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk);
}
}
}
}
/* wait for eltwise to finish */
libxsmm_barrier_wait(cfg.barrier, ltid);
}
if ( (pass & MY_PASS_BWD_D) == MY_PASS_BWD_D ) {
const libxsmm_blasint use_2d_blocking = cfg.bwd_2d_blocking;
/* number of tasks that could be run in parallel */
const libxsmm_blasint work = nBlocksIFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
/* loop variables */
libxsmm_blasint ifm1 = 0, ifm2 = 0, mb1ifm1 = 0;
libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0;
LIBXSMM_VLA_DECL(4, float, dinput, din_act_ptr, nBlocksIFm, bn, bc);
LIBXSMM_VLA_DECL(4, float, filter_tr, numa_thr_cfg->bwd_d_scratch, nBlocksOFm, bk, bc);
unsigned long long blocks = nBlocksOFm;
libxsmm_blasint KB_BLOCKS = nBlocksOFm, BF = 1;
BF = cfg.bwd_bf;
KB_BLOCKS = nBlocksOFm/BF;
blocks = KB_BLOCKS;
if (use_2d_blocking == 1) {
col_teams = cfg.bwd_col_teams;
row_teams = cfg.bwd_row_teams;
my_row_id = ltid % row_teams;
my_col_id = ltid / row_teams;
N_tasks_per_thread = LIBXSMM_UPDIV(nBlocksMB, col_teams);
M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksIFm, row_teams);
my_N_start = LIBXSMM_MIN(my_col_id * N_tasks_per_thread, nBlocksMB);
my_N_end = LIBXSMM_MIN((my_col_id+1) * N_tasks_per_thread, nBlocksMB);
my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksIFm);
my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksIFm);
}
if (use_2d_blocking == 1) {
if (BF > 1) {
for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) {
for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) {
for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) {
/* Initialize intermediate f32 tensor */
if ( ofm1 == 0 ) {
for ( mb2 = 0; mb2 < bn; ++mb2 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, mb2, ifm2, nBlocksIFm, bn, bc) = (float)0;
}
}
}
cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bk, bc ),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
}
}
}
} else {
for (ifm1 = my_M_start; ifm1 < my_M_end; ++ifm1) {
for (mb1 = my_N_start; mb1 < my_N_end; ++mb1) {
cfg.gemm_bwd2( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, 0, 0, 0, nBlocksOFm, bk, bc),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
}
}
}
} else {
if (BF > 1) {
for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) {
for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) {
mb1 = mb1ifm1%nBlocksMB;
ifm1 = mb1ifm1/nBlocksMB;
/* Initialize intermediate f32 tensor */
if ( ofm1 == 0 ) {
for ( mb2 = 0; mb2 < bn; ++mb2 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, mb2, ifm2, nBlocksIFm, bn, bc) = (float)0;
}
}
}
cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bk, bc ),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
}
}
} else {
for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) {
mb1 = mb1ifm1%nBlocksMB;
ifm1 = mb1ifm1/nBlocksMB;
cfg.gemm_bwd2( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1 - ifm_start, 0, 0, 0, nBlocksOFm, bk, bc ),
&LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks);
}
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) {
/* number of tasks that could be run in parallel */
const libxsmm_blasint ofm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ofm_subtasks;
const libxsmm_blasint ifm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ifm_subtasks;
const libxsmm_blasint bbk = (cfg.upd_2d_blocking == 1) ? bk : bk/ofm_subtasks;
const libxsmm_blasint bbc = (cfg.upd_2d_blocking == 1) ? bc : bc/ifm_subtasks;
const libxsmm_blasint work = nBlocksIFm * ifm_subtasks * nBlocksOFm * ofm_subtasks;
const libxsmm_blasint Cck_work = nBlocksIFm * ifm_subtasks * ofm_subtasks;
const libxsmm_blasint Cc_work = nBlocksIFm * ifm_subtasks;
/* 2D blocking parameters */
libxsmm_blasint use_2d_blocking = cfg.upd_2d_blocking;
libxsmm_blasint N_tasks_per_thread = 0, M_tasks_per_thread = 0, my_M_start = 0, my_M_end = 0, my_N_start = 0, my_N_end = 0, my_col_id = 0, my_row_id = 0, col_teams = 0, row_teams = 0;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
libxsmm_blasint BF = cfg.upd_bf;
/* loop variables */
libxsmm_blasint ifm1ofm1 = 0, ifm1 = 0, ifm2 = 0, bfn = 0, ii = 0, jj = 0;
/* Batch reduce related variables */
unsigned long long blocks = nBlocksMB/BF;
LIBXSMM_VLA_DECL(4, const float, input, in_act_ptr, nBlocksIFm, bn, bc);
LIBXSMM_VLA_DECL(4, float, dfilter, dwt_ptr, nBlocksIFm, bc, bk);
if (use_2d_blocking == 1) {
col_teams = cfg.upd_col_teams;
row_teams = cfg.upd_row_teams;
my_row_id = ltid % row_teams;
my_col_id = ltid / row_teams;
N_tasks_per_thread = LIBXSMM_UPDIV(nBlocksIFm, col_teams);
M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, row_teams);
my_N_start = LIBXSMM_MIN(my_col_id * N_tasks_per_thread, nBlocksIFm);
my_N_end = LIBXSMM_MIN((my_col_id+1) * N_tasks_per_thread, nBlocksIFm);
my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksOFm);
my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksOFm);
}
if (use_2d_blocking == 1) {
if (BF == 1) {
for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) {
for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) {
cfg.gemm_upd2(&LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, input, 0, ifm1, 0, 0, nBlocksIFm, bn, bc),
&LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk), &blocks);
}
}
} else {
for (bfn = 0; bfn < BF; bfn++) {
for (ofm1 = my_M_start; ofm1 < my_M_end; ++ofm1) {
for (ifm1 = my_N_start; ifm1 < my_N_end; ++ifm1) {
/* initialize current work task to zero */
if (bfn == 0) {
for (ii = 0; ii<bc; ii++) {
for (jj = 0; jj<bk; jj++) {
LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ii, jj, nBlocksIFm, bc, bk) = (float)0;
}
}
}
cfg.gemm_upd( &LIBXSMM_VLA_ACCESS(4, doutput, bfn*blocks, ofm1, 0, 0, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, input, bfn*blocks, ifm1, 0, 0, nBlocksIFm, bn, bc),
&LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk), &blocks);
}
}
}
}
} else {
if (BF == 1) {
for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) {
ofm1 = ifm1ofm1 / Cck_work;
ofm2 = (ifm1ofm1 % Cck_work) / Cc_work;
ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks;
ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks;
cfg.gemm_upd2( &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, ofm2*bbk, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, input, 0, ifm1, 0, ifm2*bbc, nBlocksIFm, bn, bc),
&LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks);
}
} else {
for (bfn = 0; bfn < BF; bfn++) {
for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) {
ofm1 = ifm1ofm1 / Cck_work;
ofm2 = (ifm1ofm1 % Cck_work) / Cc_work;
ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks;
ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks;
/* initialize current work task to zero */
if (bfn == 0) {
for (ii = 0; ii<bbc; ii++) {
for (jj = 0; jj<bbk; jj++) {
LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ifm2*bbc+ii, ofm2*bbk+jj, nBlocksIFm, bc, bk) = (float)0;
}
}
}
cfg.gemm_upd( &LIBXSMM_VLA_ACCESS(4, doutput, bfn*blocks, ofm1, 0, ofm2*bbk, nBlocksOFm, bn, bk),
&LIBXSMM_VLA_ACCESS(4, input, bfn*blocks, ifm1, 0, ifm2*bbc, nBlocksIFm, bn, bc),
&LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks);
}
}
}
}
libxsmm_barrier_wait(cfg.barrier, ltid);
}
}
void my_opt_exec( my_opt_config cfg, const float* delwt_ptr, int start_tid, int my_tid,
my_numa_thr_cfg *numa_thr_cfg, int l, my_fc_fwd_config my_fc_fwd) {
const libxsmm_blasint ltid = my_tid - numa_thr_cfg->thr_s;
const libxsmm_blasint nBlocksIFm = my_fc_fwd.C / my_fc_fwd.bc;
const libxsmm_blasint IFM_shift = my_fc_fwd.bc * my_fc_fwd.bk;
const libxsmm_blasint OFM_shift = nBlocksIFm * my_fc_fwd.bc * my_fc_fwd.bk;
const libxsmm_blasint work = ((numa_thr_cfg->blocksOFm_e[l] - numa_thr_cfg->blocksOFm_s[l]) + 1) * nBlocksIFm;
/* compute chunk size */
int thr = numa_thr_cfg->thr_e - numa_thr_cfg->thr_s;
const libxsmm_blasint chunksize = (work % thr == 0) ? (work / thr) : ((work / thr) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
libxsmm_barrier_init( cfg.barrier, my_tid );
__m512 vlr = _mm512_set1_ps( cfg.lr );
float *dw_prt = (float*)delwt_ptr + numa_thr_cfg->blocksOFm_s[l] * OFM_shift;
int j = 0, i = 0;
for (j = thr_begin; j < thr_end; j++) {
int ofm = j / nBlocksIFm;
int ifm = j % nBlocksIFm;
float *out = numa_thr_cfg->scratch[l] + ofm * OFM_shift + ifm * IFM_shift;
float *inp = dw_prt + ofm * OFM_shift + ifm * IFM_shift;
for (i = 0; i < IFM_shift; i += 16)
_mm512_storeu_ps( out+i, _mm512_sub_ps( _mm512_loadu_ps( out+i ), _mm512_mul_ps( vlr, _mm512_loadu_ps( inp + i ) ) ) ) ;
}
libxsmm_barrier_wait( cfg.barrier, my_tid );
}
void my_smax_fwd_exec( my_smax_fwd_config cfg, const float* in_act_ptr, float* out_act_ptr, const int* label_ptr, float* loss, int start_tid, int my_tid, void* scratch ) {
libxsmm_blasint bn = cfg.bn;
libxsmm_blasint Bn = cfg.N/cfg.bn;
libxsmm_blasint bc = cfg.bc;
libxsmm_blasint Bc = cfg.C/cfg.bc;
/* loop counters */
libxsmm_blasint i = 0;
libxsmm_blasint img1, img2, ifm1, ifm2;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could run in parallel for the batch */
const libxsmm_blasint n_work = Bn * bn;
/* compute chunk size */
const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work;
const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work;
LIBXSMM_VLA_DECL(4, float, output, out_act_ptr, Bc, bn, bc);
LIBXSMM_VLA_DECL(4, const float, input, in_act_ptr, Bc, bn, bc);
LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn);
/* lazy barrier init */
libxsmm_barrier_init( cfg.barrier, ltid );
for ( i = n_thr_begin; i < n_thr_end; ++i ) {
float max = FLT_MIN;
float sum_of_exp = 0.0f;
img1 = i/bn;
img2 = i%bn;
/* set output to input and set compute max per image */
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc );
if ( LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ) > max ) {
max = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc );
}
}
}
/* sum exp over outputs */
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = (float)exp( (double)(LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - max) );
sum_of_exp += LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc );
}
}
/* scale output */
sum_of_exp = 1.0f/sum_of_exp;
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * sum_of_exp;
}
}
}
libxsmm_barrier_wait( cfg.barrier, ltid );
/* calculate loss single threaded */
if ( ltid == 0 ) {
(*loss) = 0.0f;
for ( img1 = 0; img1 < Bn; ++img1 ) {
for ( img2 = 0; img2 <bn; ++img2 ) {
libxsmm_blasint ifm = (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn );
libxsmm_blasint ifm1b = ifm/bc;
libxsmm_blasint ifm2b = ifm%bc;
float val = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) > FLT_MIN ) ? LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) : FLT_MIN;
*loss = LIBXSMM_LOGF( val );
}
}
*loss = ((-1.0f)*(*loss))/cfg.N;
}
libxsmm_barrier_wait( cfg.barrier, ltid );
}
void my_smax_bwd_exec( my_smax_bwd_config cfg, float* delin_act_ptr, const float* out_act_ptr, const int* label_ptr, int start_tid, int my_tid, void* scratch ) {
libxsmm_blasint bn = cfg.bn;
libxsmm_blasint Bn = cfg.N/cfg.bn;
libxsmm_blasint bc = cfg.bc;
libxsmm_blasint Bc = cfg.C/cfg.bc;
/* loop counters */
libxsmm_blasint i = 0;
libxsmm_blasint img1, img2, ifm1, ifm2;
float rcp_N = 1.0f/cfg.N;
/* computing first logical thread */
const libxsmm_blasint ltid = my_tid - start_tid;
/* number of tasks that could run in parallel for the batch */
const libxsmm_blasint n_work = Bn * bn;
/* compute chunk size */
const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work;
const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work;
LIBXSMM_VLA_DECL(4, const float, output, out_act_ptr, Bc, bn, bc);
LIBXSMM_VLA_DECL(4, float, dinput, delin_act_ptr, Bc, bn, bc);
LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn);
/* lazy barrier init */
libxsmm_barrier_init( cfg.barrier, ltid );
for ( i = n_thr_begin; i < n_thr_end; ++i ) {
img1 = i/bn;
img2 = i%bn;
/* set output to input and set compute max per image */
for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) {
for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) {
if ( (ifm1*Bc)+ifm2 == (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ) ) {
LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) =
( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - 1.0f ) * rcp_N * cfg.loss_weight;
} else {
LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) =
LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * rcp_N * cfg.loss_weight;
}
}
}
}
libxsmm_barrier_wait( cfg.barrier, ltid );
}
void *numa_alloc_onnode_aligned(size_t size, int numa_node, int alignment_) {
#if 0
int alignment = alignment_ - 1;
size_t adj_size = sizeof(size_t) + alignment;
void *r_ptr = NULL;
void *t_ptr = numa_alloc_onnode(size + adj_size, numa_node);
if (t_ptr == NULL) return NULL;
r_ptr = (void *)(((size_t)t_ptr + adj_size) & ~alignment);
*((size_t*)r_ptr - 1) = (size_t)r_ptr - (size_t)t_ptr;
return r_ptr;
#else
return numa_alloc_onnode(size, numa_node);
#endif
}
void numa_free_aligned(void *ptr, size_t size) {
#if 0
if (ptr == NULL) return;
void *t_ptr = (void*)((size_t*)ptr - *((size_t*)ptr - 1));
numa_free(t_ptr, size);
#else
numa_free(ptr, size);
#endif
}
int setup_my_numa(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, int n_threads) {
int max_nodes = numa_max_node() + 1;
int max_cfg_nodes = numa_num_configured_nodes();
int max_cfg_cpus = numa_num_configured_cpus();
int max_task_cpus = numa_num_task_cpus();
my_numa_thr_cfg *numa_thr_cfg = (my_numa_thr_cfg *) malloc(sizeof(my_numa_thr_cfg) * max_cfg_nodes);
printf("NUMA configuration:\n");
printf("There are %d numa nodes on the system\n", max_nodes);
printf("There are %d configured numa nodes on the system\n", max_cfg_nodes);
printf("There are %d configured CPUs on the system\n", max_cfg_cpus);
printf("There are %d CPUs asigned for the current task\n", max_task_cpus);
struct bitmask* bmask = numa_bitmask_alloc(max_cfg_cpus);
int thr_count = 0, i = 0;
for (i = 0; i < max_cfg_nodes; i++) {
numa_node_to_cpus(i, bmask);
numa_thr_cfg[i].scratch = (float**) malloc(sizeof(float*) * num_layers);
numa_thr_cfg[i].layer_size = (size_t*)malloc(sizeof(size_t)*num_layers);
numa_thr_cfg[i].blocksOFm_s = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksOFm_e = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksIFm_s = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksIFm_e = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksOFm_tr_s = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksOFm_tr_e = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksIFm_tr_s = (int*)malloc(sizeof(int)*num_layers);
numa_thr_cfg[i].blocksIFm_tr_e = (int*)malloc(sizeof(int)*num_layers);
/*
printf("@@@@@ node %d size %zd cpus ", i, bmask->size);
size_t j = 0;
for(j = 0; j < bmask->size; j++)
printf("%d", numa_bitmask_isbitset(bmask, j));
printf("\n");
*/
int num_threads_in_mask = 0;
int t = 0;
for (t = 0; t < bmask->size; t++)
if (numa_bitmask_isbitset(bmask, t)) num_threads_in_mask++;
int node_threads = 0;
while(thr_count < n_threads && node_threads < num_threads_in_mask) {
if (numa_bitmask_isbitset(bmask, thr_count)) {
numa_thr_cfg[i].thr_s = thr_count;
break;
}
thr_count++; node_threads++;
}
while(thr_count < n_threads && node_threads < num_threads_in_mask) {
if (numa_bitmask_isbitset(bmask, thr_count))
numa_thr_cfg[i].thr_e = thr_count;
thr_count++; node_threads++;
}
}
*numa_thr_cfg_ = numa_thr_cfg;
return 1;
}
int setup_my_numa_fwd(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i = 0;
for (i = 0; i < max_cfg_nodes; i++) {
int l = 0;
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk;
const libxsmm_blasint nBlocksMB = my_fc_fwd[l].N / my_fc_fwd[l].bn;
if (my_fc_fwd[l].fwd_bf > 1) {
printf("@@@ NUMA ERROR: doesn't support this configuration\n");
return -1;
}
int thr = 0;
if (my_fc_fwd[l].fwd_2d_blocking == 1) {
libxsmm_blasint row_teams = my_fc_fwd[l].fwd_row_teams;
libxsmm_blasint M_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, row_teams);
numa_thr_cfg[i].blocksOFm_s[l] = nBlocksOFm;
numa_thr_cfg[i].blocksOFm_e[l] = 0;
for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e; thr++) {
libxsmm_blasint my_row_id = thr % row_teams; /* ltid */
libxsmm_blasint my_M_start = LIBXSMM_MIN(my_row_id * M_tasks_per_thread, nBlocksOFm);
libxsmm_blasint my_M_end = LIBXSMM_MIN((my_row_id+1) * M_tasks_per_thread, nBlocksOFm);
numa_thr_cfg[i].blocksOFm_s[l] = (my_M_start < numa_thr_cfg[i].blocksOFm_s[l])
? my_M_start
: numa_thr_cfg[i].blocksOFm_s[l];
numa_thr_cfg[i].blocksOFm_e[l] = (my_M_end > numa_thr_cfg[i].blocksOFm_e[l])
? my_M_end
: numa_thr_cfg[i].blocksOFm_e[l];
}
} else {
numa_thr_cfg[i].blocksOFm_s[l] = nBlocksOFm;
numa_thr_cfg[i].blocksOFm_e[l] = 0;
for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e; thr++) {
const libxsmm_blasint work = nBlocksOFm * nBlocksMB;
const libxsmm_blasint chunksize = (work % my_fc_fwd[l].threads == 0) ?
(work / my_fc_fwd[l].threads) : ((work / my_fc_fwd[l].threads) + 1);
const libxsmm_blasint thr_begin = (thr * chunksize < work) ? (thr * chunksize) : work;
const libxsmm_blasint thr_end = ((thr + 1) * chunksize < work) ? ((thr + 1) * chunksize) : work;
int ofm_s = thr_begin / nBlocksMB;
int ofm_e = (thr_end-1) / nBlocksMB;
numa_thr_cfg[i].blocksOFm_s[l] = (ofm_s < numa_thr_cfg[i].blocksOFm_s[l])
? ofm_s
: numa_thr_cfg[i].blocksOFm_s[l];
numa_thr_cfg[i].blocksOFm_e[l] = (ofm_e > numa_thr_cfg[i].blocksOFm_e[l])
? ofm_e
: numa_thr_cfg[i].blocksOFm_e[l];
}
#if 0
printf("numa_thr_cfg[%d].blocksOFm_s[%d] %d numa_thr_cfg[%d].blocksOFm_e[%d] %d\n",
i, l, numa_thr_cfg[i].blocksOFm_s[l], i, l, numa_thr_cfg[i].blocksOFm_e[l]);
#endif
}
}
}
return 1;
}
void set_fwd_ofm_to_node(int **fwd_ofm_to_node, my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) {
int max_cfg_nodes = numa_num_configured_nodes();
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int l, ofm, i;
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk;
fwd_ofm_to_node[l] = (int*) malloc(sizeof(int) * nBlocksOFm);
int *l_fwd_ofm_to_node = fwd_ofm_to_node[l];
for (i = 0; i < max_cfg_nodes; i++) {
for (ofm = 0; ofm < nBlocksOFm; ofm++) {
if (ofm >= numa_thr_cfg[i].blocksOFm_s[l] && ofm <= numa_thr_cfg[i].blocksOFm_e[l])
l_fwd_ofm_to_node[ofm] = i;
}
}
}
#if 0
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk;
int *l_fwd_ofm_to_node = fwd_ofm_to_node[l];
for (ofm = 0; ofm < nBlocksOFm; ofm++)
printf("%d l_fwd_ofm_to_node[%d] %d | %d\n", l, ofm, l_fwd_ofm_to_node[ofm], nBlocksOFm);
}
#endif
}
void free_fwd_ofm_to_node(int **fwd_ofm_to_node, int num_layers) {
int l;
for (l = 0; l < num_layers; l++) {
free(fwd_ofm_to_node[l]);
}
}
int setup_my_numa_bwd_d(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_bwd_config* my_fc_bwd) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i = 0;
for (i = 0; i < max_cfg_nodes; i++) {
int l = 0;
for (l = 0; l < num_layers; l++) {
if (my_fc_bwd[l].bwd_bf > 1) {
printf("@@@ NUMA ERROR: doesn't support this configuration\n");
return -1;
}
int thr = 0;
const libxsmm_blasint nBlocksIFm = my_fc_bwd[l].C / my_fc_bwd[l].bc;
const libxsmm_blasint nBlocksMB = my_fc_bwd[l].N / my_fc_bwd[l].bn;
if (my_fc_bwd[l].bwd_2d_blocking == 1) {
printf("@@@ NUMA ERROR: doesn't support this configuration\n");
return -1;
} else {
numa_thr_cfg[i].blocksIFm_tr_s[l] = nBlocksIFm;
numa_thr_cfg[i].blocksIFm_tr_e[l] = 0;
for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e; thr++) {
/* number of tasks that could be run in parallel */
const libxsmm_blasint work = nBlocksIFm * nBlocksMB;
/* compute chunk size */
const libxsmm_blasint chunksize = (work % my_fc_bwd[l].threads == 0) ?
(work / my_fc_bwd[l].threads) : ((work / my_fc_bwd[l].threads) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (thr * chunksize < work) ? (thr * chunksize) : work;
const libxsmm_blasint thr_end = ((thr + 1) * chunksize < work) ? ((thr + 1) * chunksize) : work;
int ifm_s = thr_begin / nBlocksMB;
int ifm_e = (thr_end-1) / nBlocksMB;
numa_thr_cfg[i].blocksIFm_tr_s[l] = (ifm_s < numa_thr_cfg[i].blocksIFm_tr_s[l])
? ifm_s
: numa_thr_cfg[i].blocksIFm_tr_s[l];
numa_thr_cfg[i].blocksIFm_tr_e[l] = (ifm_e > numa_thr_cfg[i].blocksIFm_tr_e[l])
? ifm_e
: numa_thr_cfg[i].blocksIFm_tr_e[l];
}
#if 0
printf("numa_thr_cfg[%d].blocksIFm_tr_s[%d] %d numa_thr_cfg[%d].blocksIFm_tr_e[%d] %d\n",
i, l, numa_thr_cfg[i].blocksIFm_tr_s[l], i, l, numa_thr_cfg[i].blocksIFm_tr_e[l]);
#endif
}
}
}
return 1;
}
int allocate_numa_buffers_fwd(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i = 0, l = 0;
for (i = 0; i < max_cfg_nodes; i++) {
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksIFm = my_fc_fwd[l].C / my_fc_fwd[l].bc;
const libxsmm_blasint OFM_shift = nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk;
int l_nBlocksOFm = (numa_thr_cfg[i].blocksOFm_e[l] - numa_thr_cfg[i].blocksOFm_s[l]) + 1;
if (l_nBlocksOFm <= 0)
continue;
numa_thr_cfg[i].layer_size[l] = sizeof(float) * ((l_nBlocksOFm) * OFM_shift);
numa_thr_cfg[i].scratch[l] = (float*)numa_alloc_onnode_aligned(numa_thr_cfg[i].layer_size[l], i, 2097152);
if (numa_thr_cfg[i].scratch[l] == NULL) {
printf("@@@ NUMA ERROR: cannot allocate on node #%d\n", i);
return -1;
}
}
}
return 1;
}
int allocate_numa_buffers_bwd_d(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_bwd_config* my_fc_bwd) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i = 0, l = 0;
for (i = 0; i < max_cfg_nodes; i++) {
int l_nBlocksIFm = 0;
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksOFm = my_fc_bwd[l].K / my_fc_bwd[l].bk;
const libxsmm_blasint IFM_shift = nBlocksOFm * my_fc_bwd[l].bc * my_fc_bwd[l].bk;
if (l_nBlocksIFm <= ((numa_thr_cfg[i].blocksIFm_tr_e[l] - numa_thr_cfg[i].blocksIFm_tr_s[l]) + 1) * IFM_shift)
l_nBlocksIFm = ((numa_thr_cfg[i].blocksIFm_tr_e[l] - numa_thr_cfg[i].blocksIFm_tr_s[l]) + 1) * IFM_shift;
}
numa_thr_cfg[i].bwd_d_scratch_size = sizeof(float) * (l_nBlocksIFm);
numa_thr_cfg[i].bwd_d_scratch = (float*)numa_alloc_onnode_aligned(numa_thr_cfg[i].bwd_d_scratch_size, i, 2097152);
if (numa_thr_cfg[i].bwd_d_scratch == NULL) {
printf("@@@ NUMA ERROR: cannot allocate on node #%d\n", i);
return -1;
}
}
return 1;
}
int copy_to_numa_buffers_fwd_inf(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd, float **fil_libxsmm) {
my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_;
int max_cfg_nodes = numa_num_configured_nodes();
int i,l;
#ifndef COPY_ON_LOCAL_NODES
#pragma omp parallel for collapse(2) private (i,l)
#else
#pragma omp parallel private (i,l)
{
int tid = omp_get_thread_num();
#endif
for (i = 0; i < max_cfg_nodes; i++) {
#ifdef COPY_ON_LOCAL_NODES
if (tid >= numa_thr_cfg[i].thr_s && tid <= numa_thr_cfg[i].thr_e) {
numa_run_on_node(i);
}
if (tid == numa_thr_cfg[i].thr_s) {
#endif
for (l = 0; l < num_layers; l++) {
const libxsmm_blasint nBlocksIFm = my_fc_fwd[l].C / my_fc_fwd[l].bc;
const libxsmm_blasint BOFM_shift = nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk;
int l_nBlocksOFm = (numa_thr_cfg[i].blocksOFm_e[l] - numa_thr_cfg[i].blocksOFm_s[l]) + 1;
int j = 0;
for (j = 0; j < l_nBlocksOFm ; j++) {
size_t l_BOFM_shift = j * BOFM_shift;
float *out = numa_thr_cfg[i].scratch[l] + l_BOFM_shift;
float *inp = fil_libxsmm[l] + numa_thr_cfg[i].blocksOFm_s[l] * BOFM_shift + l_BOFM_shift;
memcpy(out, inp, sizeof(float) * nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk);
}
}
#ifdef COPY_ON_LOCAL_NODES
}
#endif
}
#ifdef COPY_ON_LOCAL_NODES
}
#endif
return 1;
}
int copy_to_numa_buffers_fwd(my_numa_thr_cfg *numa_thr_cfg, my_fc_fwd_config my_fc_fwd, float *fil_libxsmm, int numa_node, int l, int my_tid, int dir) {
const libxsmm_blasint ltid = my_tid - numa_thr_cfg->thr_s;
const libxsmm_blasint nBlocksIFm = my_fc_fwd.C / my_fc_fwd.bc;
const libxsmm_blasint IFM_shift = my_fc_fwd.bc * my_fc_fwd.bk;
const libxsmm_blasint OFM_shift = nBlocksIFm * my_fc_fwd.bc * my_fc_fwd.bk;
const libxsmm_blasint work = ((numa_thr_cfg->blocksOFm_e[l] - numa_thr_cfg->blocksOFm_s[l]) + 1) * nBlocksIFm;
/* compute chunk size */
int thr = numa_thr_cfg->thr_e - numa_thr_cfg->thr_s;
const libxsmm_blasint chunksize = (work % thr == 0) ? (work / thr) : ((work / thr) + 1);
/* compute thr_begin and thr_end */
const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work;
const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work;
/*libxsmm_barrier_init( my_fc_fwd.barrier, my_tid );*/
float *inp, *out;
if (dir) {
inp = numa_thr_cfg->scratch[l];
out = fil_libxsmm + numa_thr_cfg->blocksOFm_s[l] * OFM_shift;
} else {
out = numa_thr_cfg->scratch[l];
inp = fil_libxsmm + numa_thr_cfg->blocksOFm_s[l] * OFM_shift;
}
int j = 0;
for (j = thr_begin; j < thr_end; j++) {
int ofm = j / nBlocksIFm;
int ifm = j % nBlocksIFm;
float *l_out = out + ofm * OFM_shift + ifm * IFM_shift;
float *l_inp = inp + ofm * OFM_shift + ifm * IFM_shift;
memcpy(l_out, l_inp, sizeof(float) * IFM_shift);
}
/*libxsmm_barrier_wait( my_fc_fwd.barrier, my_tid );*/
return 1;
}
int main(int argc, char* argv[])
{
float **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm;
float **bias_libxsmm, **delbias_libxsmm;
unsigned char **relumask_libxsmm;
int *label_libxsmm;
my_eltwise_fuse my_fuse;
my_fc_fwd_config* my_fc_fwd;
my_fc_bwd_config* my_fc_bwd;
my_opt_config* my_opt;
my_smax_fwd_config my_smax_fwd;
my_smax_bwd_config my_smax_bwd;
void* scratch = NULL;
size_t scratch_size = 0;
/* some parameters we can overwrite via cli,
default is some inner layer of overfeat */
int iters = 10; /* repetitions of benchmark */
int MB = 256; /* mini-batch size, "N" */
int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */
char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP, 'U', WU */
int bn = 32;
int bk = 32;
int bc = 32;
int *C; /* number of input feature maps, "C" */
int num_layers = 0;
#if defined(_OPENMP)
int nThreads = omp_get_max_threads(); /* number of threads */
#else
int nThreads = 1; /* number of threads */
#endif
unsigned long long l_start, l_end;
unsigned long long *fwd_time, *bwd_time, *solver_time;
double l_total = 0.0;
double gflop = 0.0;
int i, j;
double fil_size = 0.0;
double act_size = 0.0;
float lr = 0.2f;
float loss_weight = 0.1f;
libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff;
libxsmm_matdiff_clear(&norms_fwd);
libxsmm_matdiff_clear(&norms_bwd);
libxsmm_matdiff_clear(&norms_upd);
libxsmm_matdiff_clear(&diff);
if (argc > 1 && !strncmp(argv[1], "-h", 3)) {
printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]);
return 0;
}
libxsmm_rng_set_seed(1);
/* reading new values from cli */
i = 1;
num_layers = argc - 9;
if (argc > i) iters = atoi(argv[i++]);
if (argc > i) MB = atoi(argv[i++]);
if (argc > i) fuse_type = atoi(argv[i++]);
if (argc > i) type = *(argv[i++]);
if (argc > i) bn = atoi(argv[i++]);
if (argc > i) bk = atoi(argv[i++]);
if (argc > i) bc = atoi(argv[i++]);
/* allocate the number of channles buffer */
if ( num_layers < 1 ) {
printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]);
return 0;
}
C = (int*)malloc((num_layers+2)*sizeof(int));
for (j = 0 ; i < argc; ++i, ++j ) {
C[j] = atoi(argv[i]);
}
/* handle softmax config */
C[num_layers+1] = C[num_layers];
if (type != 'A' && type != 'F' && type != 'B') {
printf("type needs to be 'A' (All), 'F' (FP only), 'B' (BP only)\n");
return -1;
}
if ( (fuse_type < 0) || (fuse_type > 5) ) {
printf("fuse type needs to be 0 (None), 1 (Bias), 2 (ReLU), 3 (Sigmoid), 4 (Bias+ReLU), 5 (Bias+Sigmoid)\n");
return -1;
}
#if defined(__SSE3__)
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
#endif
/* print some summary */
printf("##########################################\n");
printf("# Setting Up (Common) #\n");
printf("##########################################\n");
printf("PARAMS: N:%d\n", MB);
printf("PARAMS: Layers: %d\n", num_layers);
printf("PARAMS: ITERS:%d", iters); printf(" Threads:%d\n", nThreads);
for (i = 0; i < num_layers; ++i ) {
if (i == 0) {
act_size += (double)(MB*C[i]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB*C[i]*sizeof(float))/(1024.0*1024.0) );
}
act_size += (double)(MB*C[i+1]*sizeof(float))/(1024.0*1024.0);
fil_size += (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0) );
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, MB, C[i+1], (double)(MB*C[i+1]*sizeof(float))/(1024.0*1024.0) );
}
act_size += (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers+1], (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0) );
printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE Filter: %10.2f MiB\n", fil_size );
printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size );
printf("TOTAL SIZE MLP: %10.2f MiB\n", (2.0*fil_size) + (2.0*act_size) );
/* allocate data */
/* +2 because of the softwax layer */
act_libxsmm = (float**)malloc( (num_layers+2)*sizeof(float*) );
delact_libxsmm = (float**)malloc( (num_layers+1)*sizeof(float*) );
for ( i = 0 ; i < num_layers+2; ++i ) {
#ifdef ACT_NUMA_INTERLEAVED
act_libxsmm[i] = (float*)numa_alloc_interleaved( MB*C[i]*sizeof(float));
#else
act_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152);
#endif
/* softmax has no incoming gradients */
if ( i < num_layers+1 ) {
delact_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152);
}
}
fil_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
delfil_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
for ( i = 0 ; i < num_layers; ++i ) {
fil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
delfil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
}
bias_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
delbias_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
for ( i = 0 ; i < num_layers; ++i ) {
bias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152);
delbias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152);
}
relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) );
for ( i = 0 ; i < num_layers; ++i ) {
relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152);
}
label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152);
/* init data */
for ( i = 0 ; i < num_layers+2; ++i ) {
my_init_buf( act_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers+1; ++i ) {
my_init_buf( delact_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf( fil_libxsmm[i], C[i]*C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf( delfil_libxsmm[i], C[i]*C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf( bias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf( delbias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
zero_buf_uint8( relumask_libxsmm[i], MB*C[i+1] );
}
zero_buf_int32( label_libxsmm, MB );
printf("\n");
printf("##########################################\n");
printf("# Setting Up (custom-Storage) #\n");
printf("##########################################\n");
if ( fuse_type == 0 ) {
my_fuse = MY_ELTWISE_FUSE_NONE;
} else if ( fuse_type == 1 ) {
my_fuse = MY_ELTWISE_FUSE_BIAS;
} else if ( fuse_type == 2 ) {
my_fuse = MY_ELTWISE_FUSE_RELU;
} else if ( fuse_type == 4 ) {
my_fuse = MY_ELTWISE_FUSE_BIAS_RELU;
} else {
my_fuse = MY_ELTWISE_FUSE_NONE;
}
/* allocating handles */
my_fc_fwd = (my_fc_fwd_config*) malloc( num_layers*sizeof(my_fc_fwd_config) );
my_fc_bwd = (my_fc_bwd_config*) malloc( num_layers*sizeof(my_fc_bwd_config) );
my_opt = (my_opt_config*) malloc( num_layers*sizeof(my_opt_config) );
/* setting up handles + scratch */
for ( i = 0; i < num_layers; ++i ) {
my_fc_fwd[i] = setup_my_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB,
(C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, my_fuse);
my_fc_bwd[i] = setup_my_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB,
(C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, my_fuse);
my_opt[i] = setup_my_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, lr );
/* let's allocate and bind scratch */
if ( my_fc_fwd[i].scratch_size > 0 || my_fc_bwd[i].scratch_size > 0 || my_opt[i].scratch_size > 0 ) {
size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( my_fc_fwd[i].scratch_size, my_fc_bwd[i].scratch_size), my_opt[i].scratch_size );
if ( alloc_size > scratch_size ) {
if ( scratch != NULL ) libxsmm_free( scratch );
scratch_size = alloc_size;
scratch = libxsmm_aligned_scratch( scratch_size, 2097152 );
my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 );
}
}
}
/* softmax+loss is treated as N+! layer */
my_smax_fwd = setup_my_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB,
(C[num_layers+1] % bk == 0) ? bk : C[num_layers+1],
nThreads );
my_smax_bwd = setup_my_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB,
(C[num_layers+1] % bk == 0) ? bk : C[num_layers+1],
nThreads, loss_weight );
if ( my_smax_fwd.scratch_size > 0 || my_smax_bwd.scratch_size > 0 ) {
size_t alloc_size = LIBXSMM_MAX( my_smax_fwd.scratch_size, my_smax_bwd.scratch_size );
if ( alloc_size > scratch_size ) {
if ( scratch != NULL ) libxsmm_free( scratch );
scratch_size = alloc_size;
scratch = libxsmm_aligned_scratch( scratch_size, 2097152 );
my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 );
}
}
my_numa_thr_cfg *numa_thr_cfg;
/* Define numa configuration: #numa nodes, #threads on each node */
setup_my_numa(&numa_thr_cfg, num_layers, nThreads);
if ( type == 'F') {
printf("##########################################\n");
printf("# Performance - FWD (custom-Storage) #\n");
printf("##########################################\n");
setup_my_numa_fwd(&numa_thr_cfg, num_layers, my_fc_fwd);
allocate_numa_buffers_fwd(&numa_thr_cfg, num_layers, my_fc_fwd);
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
const int numa_node = numa_node_of_cpu(tid);
for ( i = 0; i < num_layers; ++i) {
copy_to_numa_buffers_fwd(&numa_thr_cfg[numa_node], my_fc_fwd[i], fil_libxsmm[i], numa_node, i, tid, 0);
}
for (j = 0; j < iters; ++j) {
for ( i = 0; i < num_layers; ++i) {
my_fc_fwd_exec( my_fc_fwd[i], act_libxsmm[i], act_libxsmm[i+1],
bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch, &numa_thr_cfg[numa_node], i);
}
#ifdef USE_SOFTMAX
my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss,
0, tid, scratch );
#endif
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = 0; i < num_layers; ++i) {
gflop += (2.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,FP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
/* Print some norms on last act for fwd and weights of first layer after all iterations */
libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, act_libxsmm[num_layers], act_libxsmm[num_layers], 0, 0);
printf("L1 of act[num_layers] : %.25g\n", norms_fwd.l1_ref);
}
if (type == 'B') {
printf("##########################################\n");
printf("# NOT Supported: Performance - BWD (custom-Storage) #\n");
printf("##########################################\n");
exit( -1 );
#if 0
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
for (j = 0; j < iters; ++j) {
#ifdef USE_SOFTMAX
my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm,
0, tid, scratch );
#endif
for ( i = num_layers-1; i > 0; --i) {
my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i],
act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch );
my_opt_exec( my_opt[i], fil_libxsmm[i], delfil_libxsmm[i], 0, tid, scratch );
}
my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0],
act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch );
my_opt_exec( my_opt[0], fil_libxsmm[0], delfil_libxsmm[0], 0, tid, scratch );
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = num_layers-1; i > 0; --i) {
gflop += (4.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
gflop += (2.0*(double)MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0);
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
#endif
}
if (type == 'A') {
printf("##########################################\n");
printf("# Performance - FWD-BWD (custom-Storage) #\n");
printf("##########################################\n");
/* Timers: */
fwd_time = (unsigned long long *) malloc(sizeof(unsigned long long) * nThreads);
bwd_time = (unsigned long long *) malloc(sizeof(unsigned long long) * nThreads);
solver_time = (unsigned long long *) malloc(sizeof(unsigned long long) * nThreads);
/* Calculate chunks of weights used on each nume node on FWD based on FWD thread decomposition */
setup_my_numa_fwd(&numa_thr_cfg, num_layers, my_fc_fwd);
/* Calculate chunks of weights used on each nume node on BWD/d based on BWD/d thread decomposition */
setup_my_numa_bwd_d(&numa_thr_cfg, num_layers, my_fc_bwd);
/* NUMA aware allocations of buffers needed for FWD */
allocate_numa_buffers_fwd(&numa_thr_cfg, num_layers, my_fc_fwd);
/* NUMA aware allocations of buffers needed for BWD */
allocate_numa_buffers_bwd_d(&numa_thr_cfg, num_layers, my_fc_bwd);
/* Utility needed for transpoisition of weigths on BWD/d: get numa node based on current ofm */
int **fwd_ofm_to_node = (int**)malloc(sizeof(int*) * num_layers);
set_fwd_ofm_to_node(fwd_ofm_to_node, &numa_thr_cfg, num_layers, my_fc_fwd);
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
fwd_time[tid] = 0;
bwd_time[tid] = 0;
solver_time[tid] = 0;
const int numa_node = numa_node_of_cpu(tid);
for ( i = 0; i < num_layers; ++i) {
/* Copy original weights to NUMA FWD buffers. Threading decomposition is the same with FWD. */
copy_to_numa_buffers_fwd(&numa_thr_cfg[numa_node], my_fc_fwd[i], fil_libxsmm[i], numa_node, i, tid, 0);
}
for (j = 0; j < iters; ++j) {
unsigned long long fwd_time_start = libxsmm_timer_tick();
for ( i = 0; i < num_layers; ++i) {
/* FWD: Use weights from NUMA FWD buffers */
my_fc_fwd_exec( my_fc_fwd[i], act_libxsmm[i], act_libxsmm[i+1],
bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch, &numa_thr_cfg[numa_node], i );
}
fwd_time[tid] += (libxsmm_timer_tick() - fwd_time_start);
#ifdef USE_SOFTMAX
my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss,
0, tid, scratch );
my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm,
0, tid, scratch );
#endif
for ( i = num_layers-1; i > 0; --i) {
unsigned long long bwd_time_start = libxsmm_timer_tick();
/* Transpose weights from NUMA FWD buffers to NUMA BWD buffer. Threading decomposition is the same with BWD/d. */
my_fc_bwd_d_transpose( my_fc_bwd[i], tid , &numa_thr_cfg, numa_node, i, fwd_ofm_to_node[i] );
/* BWD/d: Use weights from NUMA BWD buffers */
my_fc_bwd_exec( my_fc_bwd[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i],
act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch, &numa_thr_cfg[numa_node], i );
bwd_time[tid] += (libxsmm_timer_tick() - bwd_time_start);
/* Solver: Update NUMA FWD buffers. Threading decomposition is the same with FWD. */
unsigned long long solver_time_start = libxsmm_timer_tick();
my_opt_exec( my_opt[i], delfil_libxsmm[i], 0, tid, &numa_thr_cfg[numa_node], i, my_fc_fwd[i] );
solver_time[tid] += (libxsmm_timer_tick() - solver_time_start);
}
/* BWD/w: todo */
unsigned long long bwd_time_start = libxsmm_timer_tick();
my_fc_bwd_exec( my_fc_bwd[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0],
act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch, &numa_thr_cfg[numa_node], 0 );
bwd_time[tid] += (libxsmm_timer_tick() - bwd_time_start);
/* Solver: Update NUMA FWD buffers. Threading decomposition is the same with FWD. */
unsigned long long solver_time_start = libxsmm_timer_tick();
my_opt_exec( my_opt[0], delfil_libxsmm[0], 0, tid, &numa_thr_cfg[numa_node], 0, my_fc_fwd[0] );
solver_time[tid] += (libxsmm_timer_tick() - solver_time_start);
}
/* Copy result from NUMA FWD Buffers to original weights. Threading decomposition is the same with FWD. */
for ( i = 0; i < num_layers; ++i) {
copy_to_numa_buffers_fwd(&numa_thr_cfg[numa_node], my_fc_fwd[i], fil_libxsmm[i], numa_node, i, tid, 1);
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
free_fwd_ofm_to_node(fwd_ofm_to_node, num_layers);
free(fwd_ofm_to_node);
#ifdef CHECK_L1
#if 1
/* Print some norms on last act for fwd and weights of first layer after all iterations */
libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, act_libxsmm[num_layers], act_libxsmm[num_layers], 0, 0);
printf("L1 of act[num_layers] : %.25g\n", norms_fwd.l1_ref);
libxsmm_matdiff_reduce(&diff, &norms_fwd);
libxsmm_matdiff(&norms_bwd, LIBXSMM_DATATYPE_F32, C[0]*C[1], 1, fil_libxsmm[0], fil_libxsmm[0], 0, 0);
printf("L1 of wt[0] : %.25g\n", norms_bwd.l1_ref);
libxsmm_matdiff_reduce(&diff, &norms_bwd);
#else
{
int e = 0;
FILE *fileAct, *fileWt;
fileAct = fopen("acts.txt","w+");
if (fileAct != NULL) {
for (e = 0; e < MB*C[num_layers]; e++) {
fprintf(fileAct, "%.10g\n", *((float*)act_libxsmm[num_layers] + e));
}
fclose(fileAct);
}
fileWt = fopen("weights.txt","w+");
if (fileWt != NULL) {
for (e = 0; e < C[0]*C[1]; e++) {
fprintf(fileWt, "%.10g\n", *((float*)fil_libxsmm[0] + e));
}
fclose(fileWt);
}
}
#endif
#endif
gflop = 0.0;
for ( i = num_layers-1; i > 0; --i) {
gflop += (6.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0);
}
gflop += (4.0*(double)MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0);
printf("GFLOP = %.5g\n", gflop/(double)iters);
printf("fp time = %.5g\n", ((double)(l_total/iters)));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total);
unsigned long long max_fwd_time = 0, max_bwd_time = 0, max_solver_time = 0;
for (i = 0; i < nThreads; i++) {
if (max_fwd_time < fwd_time[i]) max_fwd_time = fwd_time[i];
if (max_bwd_time < bwd_time[i]) max_bwd_time = bwd_time[i];
if (max_solver_time < solver_time[i]) max_solver_time = solver_time[i];
}
printf("Profiling: fwd_time = %lld, bwd_time = %lld, solver_time = %lld\n",
max_fwd_time, max_bwd_time, max_solver_time);
}
/* deallocate data */
if ( scratch != NULL ) {
libxsmm_free(scratch);
}
for ( i = 0; i < num_layers; ++i ) {
if ( i == 0 ) {
#ifdef ACT_NUMA_INTERLEAVED
numa_free(act_libxsmm[i], MB*C[i]*sizeof(float));
#else
libxsmm_free(act_libxsmm[i]);
#endif
libxsmm_free(delact_libxsmm[i]);
}
#ifdef ACT_NUMA_INTERLEAVED
numa_free(act_libxsmm[i+1], MB*C[i+1]*sizeof(float));
#else
libxsmm_free(act_libxsmm[i+1]);
#endif
libxsmm_free(delact_libxsmm[i+1]);
libxsmm_free(fil_libxsmm[i]);
libxsmm_free(delfil_libxsmm[i]);
libxsmm_free(bias_libxsmm[i]);
libxsmm_free(delbias_libxsmm[i]);
libxsmm_free(relumask_libxsmm[i]);
}
#ifdef ACT_NUMA_INTERLEAVED
numa_free(act_libxsmm[num_layers+1], MB*C[num_layers+1]*sizeof(float));
#else
libxsmm_free(act_libxsmm[num_layers+1]);
#endif
libxsmm_free(label_libxsmm);
for (i = 0; i < numa_num_configured_nodes(); i++) {
free(numa_thr_cfg[i].blocksOFm_s);
free(numa_thr_cfg[i].blocksOFm_e);
free(numa_thr_cfg[i].blocksIFm_tr_s);
free(numa_thr_cfg[i].blocksIFm_tr_e);
for (j = 0; j < num_layers; j++) {
numa_free_aligned(numa_thr_cfg[i].scratch[j], numa_thr_cfg[i].layer_size[j]);
}
free(numa_thr_cfg[i].scratch);
free(numa_thr_cfg[i].layer_size);
numa_free_aligned(numa_thr_cfg[i].bwd_d_scratch, numa_thr_cfg[i].bwd_d_scratch_size);
}
free(numa_thr_cfg);
free( my_opt );
free( my_fc_fwd );
free( my_fc_bwd );
free( act_libxsmm );
free( delact_libxsmm );
free( fil_libxsmm );
free( delfil_libxsmm );
free( bias_libxsmm );
free( delbias_libxsmm );
free( relumask_libxsmm );
free( C );
/* some empty lines at the end */
printf("\n\n\n");
return 0;
}
|
GB_unaryop__lnot_uint8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint8_fp64
// op(A') function: GB_tran__lnot_uint8_fp64
// C type: uint8_t
// A type: double
// cast: uint8_t cij ; GB_CAST_UNSIGNED(cij,aij,8)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z ; GB_CAST_UNSIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint8_fp64
(
uint8_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
common.h |
#pragma once
#include <Eigen/Core>
#include <Eigen/SparseCore>
#include <glog/logging.h>
#include <iostream>
namespace solvers {
#if USE_FLOAT
using Double = float;
#else
using Double = double;
#endif
using Matrix =
Eigen::Matrix<Double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
using MatrixMap = Eigen::Map<const Matrix>;
using Vector = Eigen::Matrix<Double, Eigen::Dynamic, 1>;
using VectorMap = Eigen::Map<const Vector>;
using RowVector = Eigen::Matrix<Double, 1, Eigen::Dynamic, Eigen::RowMajor>;
using RowVectorMap = Eigen::Map<const RowVector>;
using IdxVector = Eigen::Matrix<int64_t, Eigen::Dynamic, 1>;
using IdxVectorMap = Eigen::Map<const IdxVector>;
using SpMatrix = Eigen::SparseMatrix<Double, Eigen::RowMajor>;
using SpMatrixMap = Eigen::Map<const SpMatrix>;
using SpVector = Eigen::SparseVector<Double>;
/** subtract row mean from each row in data matrix */
inline void center(Double* const XData,
const size_t rows,
const size_t cols) {
#pragma omp parallel for
for (size_t r = 0; r < rows; ++r) {
Double sum = 0;
for (size_t c = 0; c < cols; ++c) {
sum += XData[r*cols + c];
}
sum /= cols;
for (size_t c = 0; c < cols; ++c) {
XData[r*cols + c] -= sum;
}
}
}
/** L2 normalize each row in data matrix */
inline void normalize(Double* const XData,
const size_t rows,
const size_t cols) {
#pragma omp parallel for
for (size_t r = 0; r < rows; ++r) {
Double sum = 0;
for (size_t c = 0; c < cols; ++c) {
Double x = XData[r*cols + c];
sum += x * x;
}
sum = std::sqrt(sum);
if (sum > 0) {
for (size_t c = 0; c < cols; ++c) {
XData[r*cols + c] /= sum;
}
}
}
}
/** L2 normalize, sparse */
inline void normalize(const size_t rows,
const size_t cols,
const size_t nnz,
const int32_t* const indptr,
const int32_t* const indices,
Double* const values) {
#pragma omp parallel for
for (size_t r = 0; r < rows; ++r) {
Double sum = 0;
for (int32_t idx = indptr[r]; idx < indptr[r + 1]; ++idx) {
Double x = values[idx];
sum += x * x;
}
sum = std::sqrt(sum);
if (sum > 0) {
for (int32_t idx = indptr[r]; idx < indptr[r + 1]; ++idx) {
values[idx] /= sum;
}
}
}
}
}
|
bitshuffle_core.c | /*
* Bitshuffle - Filter for improving compression of typed binary data.
*
* Author: Kiyoshi Masui <kiyo@physics.ubc.ca>
* Website: http://www.github.com/kiyo-masui/bitshuffle
* Created: 2014
*
* See LICENSE file for details about copyright and rights to use.
*
*/
#include "bitshuffle_core.h"
#include "bitshuffle_internals.h"
#include <stdio.h>
#include <string.h>
#ifdef _M_X64 // MSVC x64 always has SSE2 support, but doesn't define __SSE2__ macro
#define USESSE2
// TODO: potential for SSE2 support in MSVC x86
#elif defined(__SSE2__)
#define USESSE2
#endif
#if defined(__AVX2__) && defined(USESSE2)
#define USEAVX2
#endif
#if defined(__ARM_NEON__) || (__ARM_NEON)
#define USEARMNEON
#endif
// Conditional includes for SSE2 and AVX2.
#ifdef USEAVX2
#include <immintrin.h>
#elif defined USESSE2
#include <emmintrin.h>
#elif defined USEARMNEON
#include <arm_neon.h>
#endif
#if defined(_OPENMP) && defined(_MSC_VER)
typedef int64_t omp_size_t;
#else
typedef size_t omp_size_t;
#endif
// Macros.
#define CHECK_MULT_EIGHT(n) if (n % 8) return -80;
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
/* ---- Functions indicating compile time instruction set. ---- */
int bshuf_using_NEON(void) {
#ifdef USEARMNEON
return 1;
#else
return 0;
#endif
}
int bshuf_using_SSE2(void) {
#ifdef USESSE2
return 1;
#else
return 0;
#endif
}
int bshuf_using_AVX2(void) {
#ifdef USEAVX2
return 1;
#else
return 0;
#endif
}
/* ---- Worker code not requiring special instruction sets. ----
*
* The following code does not use any x86 specific vectorized instructions
* and should compile on any machine
*
*/
/* Transpose 8x8 bit array packed into a single quadword *x*.
* *t* is workspace. */
#define TRANS_BIT_8X8(x, t) { \
t = (x ^ (x >> 7)) & 0x00AA00AA00AA00AALL; \
x = x ^ t ^ (t << 7); \
t = (x ^ (x >> 14)) & 0x0000CCCC0000CCCCLL; \
x = x ^ t ^ (t << 14); \
t = (x ^ (x >> 28)) & 0x00000000F0F0F0F0LL; \
x = x ^ t ^ (t << 28); \
}
/* Transpose 8x8 bit array along the diagonal from upper right
to lower left */
#define TRANS_BIT_8X8_BE(x, t) { \
t = (x ^ (x >> 9)) & 0x0055005500550055LL; \
x = x ^ t ^ (t << 9); \
t = (x ^ (x >> 18)) & 0x0000333300003333LL; \
x = x ^ t ^ (t << 18); \
t = (x ^ (x >> 36)) & 0x000000000F0F0F0FLL; \
x = x ^ t ^ (t << 36); \
}
/* Transpose of an array of arbitrarily typed elements. */
#define TRANS_ELEM_TYPE(in, out, lda, ldb, type_t) { \
size_t ii, jj, kk; \
const type_t* in_type = (const type_t*) in; \
type_t* out_type = (type_t*) out; \
for(ii = 0; ii + 7 < lda; ii += 8) { \
for(jj = 0; jj < ldb; jj++) { \
for(kk = 0; kk < 8; kk++) { \
out_type[jj*lda + ii + kk] = \
in_type[ii*ldb + kk * ldb + jj]; \
} \
} \
} \
for(ii = lda - lda % 8; ii < lda; ii ++) { \
for(jj = 0; jj < ldb; jj++) { \
out_type[jj*lda + ii] = in_type[ii*ldb + jj]; \
} \
} \
}
/* Memory copy with bshuf call signature. For testing and profiling. */
int64_t bshuf_copy(const void* in, void* out, const size_t size,
const size_t elem_size) {
const char* in_b = (const char*) in;
char* out_b = (char*) out;
memcpy(out_b, in_b, size * elem_size);
return size * elem_size;
}
/* Transpose bytes within elements, starting partway through input. */
int64_t bshuf_trans_byte_elem_remainder(const void* in, void* out, const size_t size,
const size_t elem_size, const size_t start) {
size_t ii, jj, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(start);
if (size > start) {
// ii loop separated into 2 loops so the compiler can unroll
// the inner one.
for (ii = start; ii + 7 < size; ii += 8) {
for (jj = 0; jj < elem_size; jj++) {
for (kk = 0; kk < 8; kk++) {
out_b[jj * size + ii + kk]
= in_b[ii * elem_size + kk * elem_size + jj];
}
}
}
for (ii = size - size % 8; ii < size; ii ++) {
for (jj = 0; jj < elem_size; jj++) {
out_b[jj * size + ii] = in_b[ii * elem_size + jj];
}
}
}
return size * elem_size;
}
/* Transpose bytes within elements. */
int64_t bshuf_trans_byte_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
return bshuf_trans_byte_elem_remainder(in, out, size, elem_size, 0);
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_remainder(const void* in, void* out, const size_t size,
const size_t elem_size, const size_t start_byte) {
const uint64_t* in_b = (const uint64_t*) in;
uint8_t* out_b = (uint8_t*) out;
uint64_t x, t;
size_t ii, kk;
size_t nbyte = elem_size * size;
size_t nbyte_bitrow = nbyte / 8;
uint64_t e=1;
const int little_endian = *(uint8_t *) &e == 1;
const size_t bit_row_skip = little_endian ? nbyte_bitrow : -nbyte_bitrow;
const int64_t bit_row_offset = little_endian ? 0 : 7 * nbyte_bitrow;
CHECK_MULT_EIGHT(nbyte);
CHECK_MULT_EIGHT(start_byte);
for (ii = start_byte / 8; ii < nbyte_bitrow; ii ++) {
x = in_b[ii];
if (little_endian) {
TRANS_BIT_8X8(x, t);
} else {
TRANS_BIT_8X8_BE(x, t);
}
for (kk = 0; kk < 8; kk ++) {
out_b[bit_row_offset + kk * bit_row_skip + ii] = x;
x = x >> 8;
}
}
return size * elem_size;
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
return bshuf_trans_bit_byte_remainder(in, out, size, elem_size, 0);
}
/* General transpose of an array, optimized for large element sizes. */
int64_t bshuf_trans_elem(const void* in, void* out, const size_t lda,
const size_t ldb, const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
for(ii = 0; ii < lda; ii++) {
for(jj = 0; jj < ldb; jj++) {
memcpy(&out_b[(jj*lda + ii) * elem_size],
&in_b[(ii*ldb + jj) * elem_size], elem_size);
}
}
return lda * ldb * elem_size;
}
/* Transpose rows of shuffled bits (size / 8 bytes) within groups of 8. */
int64_t bshuf_trans_bitrow_eight(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t nbyte_bitrow = size / 8;
CHECK_MULT_EIGHT(size);
return bshuf_trans_elem(in, out, 8, elem_size, nbyte_bitrow);
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
void *tmp_buf;
CHECK_MULT_EIGHT(size);
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_scal(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj, kk, nbyte_row;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
nbyte_row = size / 8;
CHECK_MULT_EIGHT(size);
for (jj = 0; jj < elem_size; jj++) {
for (ii = 0; ii < nbyte_row; ii++) {
for (kk = 0; kk < 8; kk++) {
out_b[ii * 8 * elem_size + jj * 8 + kk] = \
in_b[(jj * 8 + kk) * nbyte_row + ii];
}
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_scal(const void* in, void* out, \
const size_t size, const size_t elem_size) {
const char *in_b;
char *out_b;
uint64_t x, t;
size_t ii, jj, kk;
size_t nbyte, out_index;
uint64_t e=1;
const int little_endian = *(uint8_t *) &e == 1;
const size_t elem_skip = little_endian ? elem_size : -elem_size;
const uint64_t elem_offset = little_endian ? 0 : 7 * elem_size;
CHECK_MULT_EIGHT(size);
in_b = (const char*) in;
out_b = (char*) out;
nbyte = elem_size * size;
for (jj = 0; jj < 8 * elem_size; jj += 8) {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte; ii += 8 * elem_size) {
x = *((uint64_t*) &in_b[ii + jj]);
if (little_endian) {
TRANS_BIT_8X8(x, t);
} else {
TRANS_BIT_8X8_BE(x, t);
}
for (kk = 0; kk < 8; kk++) {
out_index = ii + jj / 8 + elem_offset + kk * elem_skip;
*((uint8_t*) &out_b[out_index]) = x;
x = x >> 8;
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_scal(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
void *tmp_buf;
CHECK_MULT_EIGHT(size);
tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_scal(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_scal(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* ---- Worker code that uses Arm NEON ----
*
* The following code makes use of the Arm NEON instruction set.
* NEON technology is the implementation of the ARM Advanced Single
* Instruction Multiple Data (SIMD) extension.
* The NEON unit is the component of the processor that executes SIMD instructions.
* It is also called the NEON Media Processing Engine (MPE).
*
*/
#ifdef USEARMNEON
/* Transpose bytes within elements for 16 bit elements. */
int64_t bshuf_trans_byte_elem_NEON_16(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b = (const char*) in;
char *out_b = (char*) out;
int8x16_t a0, b0, a1, b1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = vld1q_s8(in_b + 2*ii + 0*16);
b0 = vld1q_s8(in_b + 2*ii + 1*16);
a1 = vzip1q_s8(a0, b0);
b1 = vzip2q_s8(a0, b0);
a0 = vzip1q_s8(a1, b1);
b0 = vzip2q_s8(a1, b1);
a1 = vzip1q_s8(a0, b0);
b1 = vzip2q_s8(a0, b0);
a0 = vzip1q_s8(a1, b1);
b0 = vzip2q_s8(a1, b1);
vst1q_s8(out_b + 0*size + ii, a0);
vst1q_s8(out_b + 1*size + ii, b0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 2,
size - size % 16);
}
/* Transpose bytes within elements for 32 bit elements. */
int64_t bshuf_trans_byte_elem_NEON_32(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
int8x16_t a0, b0, c0, d0, a1, b1, c1, d1;
int64x2_t a2, b2, c2, d2;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = vld1q_s8(in_b + 4*ii + 0*16);
b0 = vld1q_s8(in_b + 4*ii + 1*16);
c0 = vld1q_s8(in_b + 4*ii + 2*16);
d0 = vld1q_s8(in_b + 4*ii + 3*16);
a1 = vzip1q_s8(a0, b0);
b1 = vzip2q_s8(a0, b0);
c1 = vzip1q_s8(c0, d0);
d1 = vzip2q_s8(c0, d0);
a0 = vzip1q_s8(a1, b1);
b0 = vzip2q_s8(a1, b1);
c0 = vzip1q_s8(c1, d1);
d0 = vzip2q_s8(c1, d1);
a1 = vzip1q_s8(a0, b0);
b1 = vzip2q_s8(a0, b0);
c1 = vzip1q_s8(c0, d0);
d1 = vzip2q_s8(c0, d0);
a2 = vzip1q_s64(vreinterpretq_s64_s8(a1), vreinterpretq_s64_s8(c1));
b2 = vzip2q_s64(vreinterpretq_s64_s8(a1), vreinterpretq_s64_s8(c1));
c2 = vzip1q_s64(vreinterpretq_s64_s8(b1), vreinterpretq_s64_s8(d1));
d2 = vzip2q_s64(vreinterpretq_s64_s8(b1), vreinterpretq_s64_s8(d1));
vst1q_s64((int64_t *) (out_b + 0*size + ii), a2);
vst1q_s64((int64_t *) (out_b + 1*size + ii), b2);
vst1q_s64((int64_t *) (out_b + 2*size + ii), c2);
vst1q_s64((int64_t *) (out_b + 3*size + ii), d2);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 4,
size - size % 16);
}
/* Transpose bytes within elements for 64 bit elements. */
int64_t bshuf_trans_byte_elem_NEON_64(const void* in, void* out, const size_t size) {
size_t ii;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
int8x16_t a0, b0, c0, d0, e0, f0, g0, h0;
int8x16_t a1, b1, c1, d1, e1, f1, g1, h1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = vld1q_s8(in_b + 8*ii + 0*16);
b0 = vld1q_s8(in_b + 8*ii + 1*16);
c0 = vld1q_s8(in_b + 8*ii + 2*16);
d0 = vld1q_s8(in_b + 8*ii + 3*16);
e0 = vld1q_s8(in_b + 8*ii + 4*16);
f0 = vld1q_s8(in_b + 8*ii + 5*16);
g0 = vld1q_s8(in_b + 8*ii + 6*16);
h0 = vld1q_s8(in_b + 8*ii + 7*16);
a1 = vzip1q_s8 (a0, b0);
b1 = vzip2q_s8 (a0, b0);
c1 = vzip1q_s8 (c0, d0);
d1 = vzip2q_s8 (c0, d0);
e1 = vzip1q_s8 (e0, f0);
f1 = vzip2q_s8 (e0, f0);
g1 = vzip1q_s8 (g0, h0);
h1 = vzip2q_s8 (g0, h0);
a0 = vzip1q_s8 (a1, b1);
b0 = vzip2q_s8 (a1, b1);
c0 = vzip1q_s8 (c1, d1);
d0 = vzip2q_s8 (c1, d1);
e0 = vzip1q_s8 (e1, f1);
f0 = vzip2q_s8 (e1, f1);
g0 = vzip1q_s8 (g1, h1);
h0 = vzip2q_s8 (g1, h1);
a1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (c0));
b1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (c0));
c1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (b0), vreinterpretq_s32_s8 (d0));
d1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (b0), vreinterpretq_s32_s8 (d0));
e1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (g0));
f1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (g0));
g1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (f0), vreinterpretq_s32_s8 (h0));
h1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (f0), vreinterpretq_s32_s8 (h0));
a0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (a1), vreinterpretq_s64_s8 (e1));
b0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (a1), vreinterpretq_s64_s8 (e1));
c0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (b1), vreinterpretq_s64_s8 (f1));
d0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (b1), vreinterpretq_s64_s8 (f1));
e0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (c1), vreinterpretq_s64_s8 (g1));
f0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (c1), vreinterpretq_s64_s8 (g1));
g0 = (int8x16_t) vzip1q_s64 (vreinterpretq_s64_s8 (d1), vreinterpretq_s64_s8 (h1));
h0 = (int8x16_t) vzip2q_s64 (vreinterpretq_s64_s8 (d1), vreinterpretq_s64_s8 (h1));
vst1q_s8(out_b + 0*size + ii, a0);
vst1q_s8(out_b + 1*size + ii, b0);
vst1q_s8(out_b + 2*size + ii, c0);
vst1q_s8(out_b + 3*size + ii, d0);
vst1q_s8(out_b + 4*size + ii, e0);
vst1q_s8(out_b + 5*size + ii, f0);
vst1q_s8(out_b + 6*size + ii, g0);
vst1q_s8(out_b + 7*size + ii, h0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 8,
size - size % 16);
}
/* Transpose bytes within elements using best NEON algorithm available. */
int64_t bshuf_trans_byte_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
// Trivial cases: power of 2 bytes.
switch (elem_size) {
case 1:
count = bshuf_copy(in, out, size, elem_size);
return count;
case 2:
count = bshuf_trans_byte_elem_NEON_16(in, out, size);
return count;
case 4:
count = bshuf_trans_byte_elem_NEON_32(in, out, size);
return count;
case 8:
count = bshuf_trans_byte_elem_NEON_64(in, out, size);
return count;
}
// Worst case: odd number of bytes. Turns out that this is faster for
// (odd * 2) byte elements as well (hence % 4).
if (elem_size % 4) {
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
return count;
}
// Multiple of power of 2: transpose hierarchically.
{
size_t nchunk_elem;
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
if ((elem_size % 8) == 0) {
nchunk_elem = elem_size / 8;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t);
count = bshuf_trans_byte_elem_NEON_64(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size);
} else if ((elem_size % 4) == 0) {
nchunk_elem = elem_size / 4;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t);
count = bshuf_trans_byte_elem_NEON_32(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size);
} else {
// Not used since scalar algorithm is faster.
nchunk_elem = elem_size / 2;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t);
count = bshuf_trans_byte_elem_NEON_16(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size);
}
free(tmp_buf);
return count;
}
}
/* Creates a mask made up of the most significant
* bit of each byte of 'input'
*/
int32_t move_byte_mask_neon(uint8x16_t input) {
return ( ((input[0] & 0x80) >> 7) | (((input[1] & 0x80) >> 7) << 1) | (((input[2] & 0x80) >> 7) << 2) | (((input[3] & 0x80) >> 7) << 3)
| (((input[4] & 0x80) >> 7) << 4) | (((input[5] & 0x80) >> 7) << 5) | (((input[6] & 0x80) >> 7) << 6) | (((input[7] & 0x80) >> 7) << 7)
| (((input[8] & 0x80) >> 7) << 8) | (((input[9] & 0x80) >> 7) << 9) | (((input[10] & 0x80) >> 7) << 10) | (((input[11] & 0x80) >> 7) << 11)
| (((input[12] & 0x80) >> 7) << 12) | (((input[13] & 0x80) >> 7) << 13) | (((input[14] & 0x80) >> 7) << 14) | (((input[15] & 0x80) >> 7) << 15)
);
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
uint16_t* out_ui16;
int64_t count;
size_t nbyte = elem_size * size;
CHECK_MULT_EIGHT(nbyte);
int16x8_t xmm;
int32_t bt;
for (ii = 0; ii + 15 < nbyte; ii += 16) {
xmm = vld1q_s16((int16_t *) (in_b + ii));
for (kk = 0; kk < 8; kk++) {
bt = move_byte_mask_neon((uint8x16_t) xmm);
xmm = vshlq_n_s16(xmm, 1);
out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_ui16 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 16);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_NEON(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_NEON(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
int8x16_t a0, b0, c0, d0, e0, f0, g0, h0;
int8x16_t a1, b1, c1, d1, e1, f1, g1, h1;
int64x1_t *as, *bs, *cs, *ds, *es, *fs, *gs, *hs;
for (ii = 0; ii + 7 < nrows; ii += 8) {
for (jj = 0; jj + 15 < nbyte_row; jj += 16) {
a0 = vld1q_s8(in_b + (ii + 0)*nbyte_row + jj);
b0 = vld1q_s8(in_b + (ii + 1)*nbyte_row + jj);
c0 = vld1q_s8(in_b + (ii + 2)*nbyte_row + jj);
d0 = vld1q_s8(in_b + (ii + 3)*nbyte_row + jj);
e0 = vld1q_s8(in_b + (ii + 4)*nbyte_row + jj);
f0 = vld1q_s8(in_b + (ii + 5)*nbyte_row + jj);
g0 = vld1q_s8(in_b + (ii + 6)*nbyte_row + jj);
h0 = vld1q_s8(in_b + (ii + 7)*nbyte_row + jj);
a1 = vzip1q_s8(a0, b0);
b1 = vzip1q_s8(c0, d0);
c1 = vzip1q_s8(e0, f0);
d1 = vzip1q_s8(g0, h0);
e1 = vzip2q_s8(a0, b0);
f1 = vzip2q_s8(c0, d0);
g1 = vzip2q_s8(e0, f0);
h1 = vzip2q_s8(g0, h0);
a0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (a1), vreinterpretq_s16_s8 (b1));
b0= (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (c1), vreinterpretq_s16_s8 (d1));
c0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (a1), vreinterpretq_s16_s8 (b1));
d0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (c1), vreinterpretq_s16_s8 (d1));
e0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (e1), vreinterpretq_s16_s8 (f1));
f0 = (int8x16_t) vzip1q_s16 (vreinterpretq_s16_s8 (g1), vreinterpretq_s16_s8 (h1));
g0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (e1), vreinterpretq_s16_s8 (f1));
h0 = (int8x16_t) vzip2q_s16 (vreinterpretq_s16_s8 (g1), vreinterpretq_s16_s8 (h1));
a1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (b0));
b1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (a0), vreinterpretq_s32_s8 (b0));
c1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (c0), vreinterpretq_s32_s8 (d0));
d1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (c0), vreinterpretq_s32_s8 (d0));
e1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (f0));
f1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (e0), vreinterpretq_s32_s8 (f0));
g1 = (int8x16_t) vzip1q_s32 (vreinterpretq_s32_s8 (g0), vreinterpretq_s32_s8 (h0));
h1 = (int8x16_t) vzip2q_s32 (vreinterpretq_s32_s8 (g0), vreinterpretq_s32_s8 (h0));
as = (int64x1_t *) &a1;
bs = (int64x1_t *) &b1;
cs = (int64x1_t *) &c1;
ds = (int64x1_t *) &d1;
es = (int64x1_t *) &e1;
fs = (int64x1_t *) &f1;
gs = (int64x1_t *) &g1;
hs = (int64x1_t *) &h1;
vst1_s64((int64_t *)(out_b + (jj + 0) * nrows + ii), *as);
vst1_s64((int64_t *)(out_b + (jj + 1) * nrows + ii), *(as + 1));
vst1_s64((int64_t *)(out_b + (jj + 2) * nrows + ii), *bs);
vst1_s64((int64_t *)(out_b + (jj + 3) * nrows + ii), *(bs + 1));
vst1_s64((int64_t *)(out_b + (jj + 4) * nrows + ii), *cs);
vst1_s64((int64_t *)(out_b + (jj + 5) * nrows + ii), *(cs + 1));
vst1_s64((int64_t *)(out_b + (jj + 6) * nrows + ii), *ds);
vst1_s64((int64_t *)(out_b + (jj + 7) * nrows + ii), *(ds + 1));
vst1_s64((int64_t *)(out_b + (jj + 8) * nrows + ii), *es);
vst1_s64((int64_t *)(out_b + (jj + 9) * nrows + ii), *(es + 1));
vst1_s64((int64_t *)(out_b + (jj + 10) * nrows + ii), *fs);
vst1_s64((int64_t *)(out_b + (jj + 11) * nrows + ii), *(fs + 1));
vst1_s64((int64_t *)(out_b + (jj + 12) * nrows + ii), *gs);
vst1_s64((int64_t *)(out_b + (jj + 13) * nrows + ii), *(gs + 1));
vst1_s64((int64_t *)(out_b + (jj + 14) * nrows + ii), *hs);
vst1_s64((int64_t *)(out_b + (jj + 15) * nrows + ii), *(hs + 1));
}
for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj];
out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj];
out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj];
out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj];
out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj];
out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj];
out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj];
out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
uint16_t* out_ui16 = (uint16_t*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
int16x8_t xmm;
int32_t bt;
if (elem_size % 2) {
bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size);
} else {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) {
xmm = vld1q_s16((int16_t *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = move_byte_mask_neon((uint8x16_t) xmm);
xmm = vshlq_n_s16(xmm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
out_ui16[ind / 2] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_NEON(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_NEON(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USEARMNEON
int64_t bshuf_untrans_bit_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_bit_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_byte_bitrow_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_bit_byte_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_byte_elem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
int64_t bshuf_trans_byte_elem_NEON_64(const void* in, void* out, const size_t size) {
return -13;
}
int64_t bshuf_trans_byte_elem_NEON_32(const void* in, void* out, const size_t size) {
return -13;
}
int64_t bshuf_trans_byte_elem_NEON_16(const void* in, void* out, const size_t size) {
return -13;
}
int64_t bshuf_shuffle_bit_eightelem_NEON(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -13;
}
#endif
/* ---- Worker code that uses SSE2 ----
*
* The following code makes use of the SSE2 instruction set and specialized
* 16 byte registers. The SSE2 instructions are present on modern x86
* processors. The first Intel processor microarchitecture supporting SSE2 was
* Pentium 4 (2000).
*
*/
#ifdef USESSE2
/* Transpose bytes within elements for 16 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b = (const char*) in;
char *out_b = (char*) out;
__m128i a0, b0, a1, b1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[2*ii + 1*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 2,
size - size % 16);
}
/* Transpose bytes within elements for 32 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) {
size_t ii;
const char *in_b;
char *out_b;
in_b = (const char*) in;
out_b = (char*) out;
__m128i a0, b0, c0, d0, a1, b1, c1, d1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 1*16]);
c0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 2*16]);
d0 = _mm_loadu_si128((__m128i *) &in_b[4*ii + 3*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
c0 = _mm_unpacklo_epi8(c1, d1);
d0 = _mm_unpackhi_epi8(c1, d1);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
a0 = _mm_unpacklo_epi64(a1, c1);
b0 = _mm_unpackhi_epi64(a1, c1);
c0 = _mm_unpacklo_epi64(b1, d1);
d0 = _mm_unpackhi_epi64(b1, d1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
_mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0);
_mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 4,
size - size % 16);
}
/* Transpose bytes within elements for 64 bit elements. */
int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) {
size_t ii;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
__m128i a0, b0, c0, d0, e0, f0, g0, h0;
__m128i a1, b1, c1, d1, e1, f1, g1, h1;
for (ii=0; ii + 15 < size; ii += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 0*16]);
b0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 1*16]);
c0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 2*16]);
d0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 3*16]);
e0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 4*16]);
f0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 5*16]);
g0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 6*16]);
h0 = _mm_loadu_si128((__m128i *) &in_b[8*ii + 7*16]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpackhi_epi8(a0, b0);
c1 = _mm_unpacklo_epi8(c0, d0);
d1 = _mm_unpackhi_epi8(c0, d0);
e1 = _mm_unpacklo_epi8(e0, f0);
f1 = _mm_unpackhi_epi8(e0, f0);
g1 = _mm_unpacklo_epi8(g0, h0);
h1 = _mm_unpackhi_epi8(g0, h0);
a0 = _mm_unpacklo_epi8(a1, b1);
b0 = _mm_unpackhi_epi8(a1, b1);
c0 = _mm_unpacklo_epi8(c1, d1);
d0 = _mm_unpackhi_epi8(c1, d1);
e0 = _mm_unpacklo_epi8(e1, f1);
f0 = _mm_unpackhi_epi8(e1, f1);
g0 = _mm_unpacklo_epi8(g1, h1);
h0 = _mm_unpackhi_epi8(g1, h1);
a1 = _mm_unpacklo_epi32(a0, c0);
b1 = _mm_unpackhi_epi32(a0, c0);
c1 = _mm_unpacklo_epi32(b0, d0);
d1 = _mm_unpackhi_epi32(b0, d0);
e1 = _mm_unpacklo_epi32(e0, g0);
f1 = _mm_unpackhi_epi32(e0, g0);
g1 = _mm_unpacklo_epi32(f0, h0);
h1 = _mm_unpackhi_epi32(f0, h0);
a0 = _mm_unpacklo_epi64(a1, e1);
b0 = _mm_unpackhi_epi64(a1, e1);
c0 = _mm_unpacklo_epi64(b1, f1);
d0 = _mm_unpackhi_epi64(b1, f1);
e0 = _mm_unpacklo_epi64(c1, g1);
f0 = _mm_unpackhi_epi64(c1, g1);
g0 = _mm_unpacklo_epi64(d1, h1);
h0 = _mm_unpackhi_epi64(d1, h1);
_mm_storeu_si128((__m128i *) &out_b[0*size + ii], a0);
_mm_storeu_si128((__m128i *) &out_b[1*size + ii], b0);
_mm_storeu_si128((__m128i *) &out_b[2*size + ii], c0);
_mm_storeu_si128((__m128i *) &out_b[3*size + ii], d0);
_mm_storeu_si128((__m128i *) &out_b[4*size + ii], e0);
_mm_storeu_si128((__m128i *) &out_b[5*size + ii], f0);
_mm_storeu_si128((__m128i *) &out_b[6*size + ii], g0);
_mm_storeu_si128((__m128i *) &out_b[7*size + ii], h0);
}
return bshuf_trans_byte_elem_remainder(in, out, size, 8,
size - size % 16);
}
/* Transpose bytes within elements using best SSE algorithm available. */
int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
// Trivial cases: power of 2 bytes.
switch (elem_size) {
case 1:
count = bshuf_copy(in, out, size, elem_size);
return count;
case 2:
count = bshuf_trans_byte_elem_SSE_16(in, out, size);
return count;
case 4:
count = bshuf_trans_byte_elem_SSE_32(in, out, size);
return count;
case 8:
count = bshuf_trans_byte_elem_SSE_64(in, out, size);
return count;
}
// Worst case: odd number of bytes. Turns out that this is faster for
// (odd * 2) byte elements as well (hence % 4).
if (elem_size % 4) {
count = bshuf_trans_byte_elem_scal(in, out, size, elem_size);
return count;
}
// Multiple of power of 2: transpose hierarchically.
{
size_t nchunk_elem;
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
if ((elem_size % 8) == 0) {
nchunk_elem = elem_size / 8;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int64_t);
count = bshuf_trans_byte_elem_SSE_64(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 8, nchunk_elem, size);
} else if ((elem_size % 4) == 0) {
nchunk_elem = elem_size / 4;
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int32_t);
count = bshuf_trans_byte_elem_SSE_32(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 4, nchunk_elem, size);
} else {
// Not used since scalar algorithm is faster.
nchunk_elem = elem_size / 2;
#ifdef _WIN32 // Windows doesn't defined `int16_t`
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, __int16);
#else
TRANS_ELEM_TYPE(in, out, size, nchunk_elem, int16_t);
#endif
count = bshuf_trans_byte_elem_SSE_16(out, tmp_buf,
size * nchunk_elem);
bshuf_trans_elem(tmp_buf, out, 2, nchunk_elem, size);
}
free(tmp_buf);
return count;
}
}
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
uint16_t* out_ui16;
int64_t count;
size_t nbyte = elem_size * size;
CHECK_MULT_EIGHT(nbyte);
__m128i xmm;
int32_t bt;
for (ii = 0; ii + 15 < nbyte; ii += 16) {
xmm = _mm_loadu_si128((__m128i *) &in_b[ii]);
for (kk = 0; kk < 8; kk++) {
bt = _mm_movemask_epi8(xmm);
xmm = _mm_slli_epi16(xmm, 1);
out_ui16 = (uint16_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_ui16 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 16);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_SSE(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, jj;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
__m128i a0, b0, c0, d0, e0, f0, g0, h0;
__m128i a1, b1, c1, d1, e1, f1, g1, h1;
__m128 *as, *bs, *cs, *ds, *es, *fs, *gs, *hs;
for (ii = 0; ii + 7 < nrows; ii += 8) {
for (jj = 0; jj + 15 < nbyte_row; jj += 16) {
a0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 0)*nbyte_row + jj]);
b0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 1)*nbyte_row + jj]);
c0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 2)*nbyte_row + jj]);
d0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 3)*nbyte_row + jj]);
e0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 4)*nbyte_row + jj]);
f0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 5)*nbyte_row + jj]);
g0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 6)*nbyte_row + jj]);
h0 = _mm_loadu_si128((__m128i *) &in_b[(ii + 7)*nbyte_row + jj]);
a1 = _mm_unpacklo_epi8(a0, b0);
b1 = _mm_unpacklo_epi8(c0, d0);
c1 = _mm_unpacklo_epi8(e0, f0);
d1 = _mm_unpacklo_epi8(g0, h0);
e1 = _mm_unpackhi_epi8(a0, b0);
f1 = _mm_unpackhi_epi8(c0, d0);
g1 = _mm_unpackhi_epi8(e0, f0);
h1 = _mm_unpackhi_epi8(g0, h0);
a0 = _mm_unpacklo_epi16(a1, b1);
b0 = _mm_unpacklo_epi16(c1, d1);
c0 = _mm_unpackhi_epi16(a1, b1);
d0 = _mm_unpackhi_epi16(c1, d1);
e0 = _mm_unpacklo_epi16(e1, f1);
f0 = _mm_unpacklo_epi16(g1, h1);
g0 = _mm_unpackhi_epi16(e1, f1);
h0 = _mm_unpackhi_epi16(g1, h1);
a1 = _mm_unpacklo_epi32(a0, b0);
b1 = _mm_unpackhi_epi32(a0, b0);
c1 = _mm_unpacklo_epi32(c0, d0);
d1 = _mm_unpackhi_epi32(c0, d0);
e1 = _mm_unpacklo_epi32(e0, f0);
f1 = _mm_unpackhi_epi32(e0, f0);
g1 = _mm_unpacklo_epi32(g0, h0);
h1 = _mm_unpackhi_epi32(g0, h0);
// We don't have a storeh instruction for integers, so interpret
// as a float. Have a storel (_mm_storel_epi64).
as = (__m128 *) &a1;
bs = (__m128 *) &b1;
cs = (__m128 *) &c1;
ds = (__m128 *) &d1;
es = (__m128 *) &e1;
fs = (__m128 *) &f1;
gs = (__m128 *) &g1;
hs = (__m128 *) &h1;
_mm_storel_pi((__m64 *) &out_b[(jj + 0) * nrows + ii], *as);
_mm_storel_pi((__m64 *) &out_b[(jj + 2) * nrows + ii], *bs);
_mm_storel_pi((__m64 *) &out_b[(jj + 4) * nrows + ii], *cs);
_mm_storel_pi((__m64 *) &out_b[(jj + 6) * nrows + ii], *ds);
_mm_storel_pi((__m64 *) &out_b[(jj + 8) * nrows + ii], *es);
_mm_storel_pi((__m64 *) &out_b[(jj + 10) * nrows + ii], *fs);
_mm_storel_pi((__m64 *) &out_b[(jj + 12) * nrows + ii], *gs);
_mm_storel_pi((__m64 *) &out_b[(jj + 14) * nrows + ii], *hs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 1) * nrows + ii], *as);
_mm_storeh_pi((__m64 *) &out_b[(jj + 3) * nrows + ii], *bs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 5) * nrows + ii], *cs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 7) * nrows + ii], *ds);
_mm_storeh_pi((__m64 *) &out_b[(jj + 9) * nrows + ii], *es);
_mm_storeh_pi((__m64 *) &out_b[(jj + 11) * nrows + ii], *fs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 13) * nrows + ii], *gs);
_mm_storeh_pi((__m64 *) &out_b[(jj + 15) * nrows + ii], *hs);
}
for (jj = nbyte_row - nbyte_row % 16; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii + 0] = in_b[(ii + 0)*nbyte_row + jj];
out_b[jj * nrows + ii + 1] = in_b[(ii + 1)*nbyte_row + jj];
out_b[jj * nrows + ii + 2] = in_b[(ii + 2)*nbyte_row + jj];
out_b[jj * nrows + ii + 3] = in_b[(ii + 3)*nbyte_row + jj];
out_b[jj * nrows + ii + 4] = in_b[(ii + 4)*nbyte_row + jj];
out_b[jj * nrows + ii + 5] = in_b[(ii + 5)*nbyte_row + jj];
out_b[jj * nrows + ii + 6] = in_b[(ii + 6)*nbyte_row + jj];
out_b[jj * nrows + ii + 7] = in_b[(ii + 7)*nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
uint16_t* out_ui16 = (uint16_t*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
__m128i xmm;
int32_t bt;
if (elem_size % 2) {
bshuf_shuffle_bit_eightelem_scal(in, out, size, elem_size);
} else {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
for (jj = 0; jj + 15 < 8 * elem_size; jj += 16) {
xmm = _mm_loadu_si128((__m128i *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = _mm_movemask_epi8(xmm);
xmm = _mm_slli_epi16(xmm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
out_ui16[ind / 2] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_SSE(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_SSE(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USESSE2
int64_t bshuf_untrans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_bit_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_bitrow_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_bit_byte_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_64(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_32(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_trans_byte_elem_SSE_16(const void* in, void* out, const size_t size) {
return -11;
}
int64_t bshuf_shuffle_bit_eightelem_SSE(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -11;
}
#endif // #ifdef USESSE2
/* ---- Code that requires AVX2. Intel Haswell (2013) and later. ---- */
/* ---- Worker code that uses AVX2 ----
*
* The following code makes use of the AVX2 instruction set and specialized
* 32 byte registers. The AVX2 instructions are present on newer x86
* processors. The first Intel processor microarchitecture supporting AVX2 was
* Haswell (2013).
*
*/
#ifdef USEAVX2
/* Transpose bits within bytes. */
int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t ii, kk;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
int32_t* out_i32;
size_t nbyte = elem_size * size;
int64_t count;
__m256i ymm;
int32_t bt;
for (ii = 0; ii + 31 < nbyte; ii += 32) {
ymm = _mm256_loadu_si256((__m256i *) &in_b[ii]);
for (kk = 0; kk < 8; kk++) {
bt = _mm256_movemask_epi8(ymm);
ymm = _mm256_slli_epi16(ymm, 1);
out_i32 = (int32_t*) &out_b[((7 - kk) * nbyte + ii) / 8];
*out_i32 = bt;
}
}
count = bshuf_trans_bit_byte_remainder(in, out, size, elem_size,
nbyte - nbyte % 32);
return count;
}
/* Transpose bits within elements. */
int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_elem_SSE(in, out, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bit_byte_AVX(out, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_trans_bitrow_eight(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
/* For data organized into a row for each bit (8 * elem_size rows), transpose
* the bytes. */
int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
size_t hh, ii, jj, kk, mm;
const char* in_b = (const char*) in;
char* out_b = (char*) out;
CHECK_MULT_EIGHT(size);
size_t nrows = 8 * elem_size;
size_t nbyte_row = size / 8;
if (elem_size % 4) return bshuf_trans_byte_bitrow_SSE(in, out, size,
elem_size);
__m256i ymm_0[8];
__m256i ymm_1[8];
__m256i ymm_storeage[8][4];
for (jj = 0; jj + 31 < nbyte_row; jj += 32) {
for (ii = 0; ii + 3 < elem_size; ii += 4) {
for (hh = 0; hh < 4; hh ++) {
for (kk = 0; kk < 8; kk ++){
ymm_0[kk] = _mm256_loadu_si256((__m256i *) &in_b[
(ii * 8 + hh * 8 + kk) * nbyte_row + jj]);
}
for (kk = 0; kk < 4; kk ++){
ymm_1[kk] = _mm256_unpacklo_epi8(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
ymm_1[kk + 4] = _mm256_unpackhi_epi8(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
}
for (kk = 0; kk < 2; kk ++){
for (mm = 0; mm < 2; mm ++){
ymm_0[kk * 4 + mm] = _mm256_unpacklo_epi16(
ymm_1[kk * 4 + mm * 2],
ymm_1[kk * 4 + mm * 2 + 1]);
ymm_0[kk * 4 + mm + 2] = _mm256_unpackhi_epi16(
ymm_1[kk * 4 + mm * 2],
ymm_1[kk * 4 + mm * 2 + 1]);
}
}
for (kk = 0; kk < 4; kk ++){
ymm_1[kk * 2] = _mm256_unpacklo_epi32(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
ymm_1[kk * 2 + 1] = _mm256_unpackhi_epi32(ymm_0[kk * 2],
ymm_0[kk * 2 + 1]);
}
for (kk = 0; kk < 8; kk ++){
ymm_storeage[kk][hh] = ymm_1[kk];
}
}
for (mm = 0; mm < 8; mm ++) {
for (kk = 0; kk < 4; kk ++){
ymm_0[kk] = ymm_storeage[mm][kk];
}
ymm_1[0] = _mm256_unpacklo_epi64(ymm_0[0], ymm_0[1]);
ymm_1[1] = _mm256_unpacklo_epi64(ymm_0[2], ymm_0[3]);
ymm_1[2] = _mm256_unpackhi_epi64(ymm_0[0], ymm_0[1]);
ymm_1[3] = _mm256_unpackhi_epi64(ymm_0[2], ymm_0[3]);
ymm_0[0] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 32);
ymm_0[1] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 32);
ymm_0[2] = _mm256_permute2x128_si256(ymm_1[0], ymm_1[1], 49);
ymm_0[3] = _mm256_permute2x128_si256(ymm_1[2], ymm_1[3], 49);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 0 * 16) * nrows + ii * 8], ymm_0[0]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 0 * 16 + 1) * nrows + ii * 8], ymm_0[1]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 1 * 16) * nrows + ii * 8], ymm_0[2]);
_mm256_storeu_si256((__m256i *) &out_b[
(jj + mm * 2 + 1 * 16 + 1) * nrows + ii * 8], ymm_0[3]);
}
}
}
for (ii = 0; ii < nrows; ii ++ ) {
for (jj = nbyte_row - nbyte_row % 32; jj < nbyte_row; jj ++) {
out_b[jj * nrows + ii] = in_b[ii * nbyte_row + jj];
}
}
return size * elem_size;
}
/* Shuffle bits within the bytes of eight element blocks. */
int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
CHECK_MULT_EIGHT(size);
// With a bit of care, this could be written such that such that it is
// in_buf = out_buf safe.
const char* in_b = (const char*) in;
char* out_b = (char*) out;
size_t ii, jj, kk;
size_t nbyte = elem_size * size;
__m256i ymm;
int32_t bt;
if (elem_size % 4) {
return bshuf_shuffle_bit_eightelem_SSE(in, out, size, elem_size);
} else {
for (jj = 0; jj + 31 < 8 * elem_size; jj += 32) {
for (ii = 0; ii + 8 * elem_size - 1 < nbyte;
ii += 8 * elem_size) {
ymm = _mm256_loadu_si256((__m256i *) &in_b[ii + jj]);
for (kk = 0; kk < 8; kk++) {
bt = _mm256_movemask_epi8(ymm);
ymm = _mm256_slli_epi16(ymm, 1);
size_t ind = (ii + jj / 8 + (7 - kk) * elem_size);
* (int32_t *) &out_b[ind] = bt;
}
}
}
}
return size * elem_size;
}
/* Untranspose bits within elements. */
int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
CHECK_MULT_EIGHT(size);
void* tmp_buf = malloc(size * elem_size);
if (tmp_buf == NULL) return -1;
count = bshuf_trans_byte_bitrow_AVX(in, tmp_buf, size, elem_size);
CHECK_ERR_FREE(count, tmp_buf);
count = bshuf_shuffle_bit_eightelem_AVX(tmp_buf, out, size, elem_size);
free(tmp_buf);
return count;
}
#else // #ifdef USEAVX2
int64_t bshuf_trans_bit_byte_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_trans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_trans_byte_bitrow_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_shuffle_bit_eightelem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
int64_t bshuf_untrans_bit_elem_AVX(const void* in, void* out, const size_t size,
const size_t elem_size) {
return -12;
}
#endif // #ifdef USEAVX2
/* ---- Drivers selecting best instruction set at compile time. ---- */
int64_t bshuf_trans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
#ifdef USEAVX2
count = bshuf_trans_bit_elem_AVX(in, out, size, elem_size);
#elif defined(USESSE2)
count = bshuf_trans_bit_elem_SSE(in, out, size, elem_size);
#elif defined(USEARMNEON)
count = bshuf_trans_bit_elem_NEON(in, out, size, elem_size);
#else
count = bshuf_trans_bit_elem_scal(in, out, size, elem_size);
#endif
return count;
}
int64_t bshuf_untrans_bit_elem(const void* in, void* out, const size_t size,
const size_t elem_size) {
int64_t count;
#ifdef USEAVX2
count = bshuf_untrans_bit_elem_AVX(in, out, size, elem_size);
#elif defined(USESSE2)
count = bshuf_untrans_bit_elem_SSE(in, out, size, elem_size);
#elif defined(USEARMNEON)
count = bshuf_untrans_bit_elem_NEON(in, out, size, elem_size);
#else
count = bshuf_untrans_bit_elem_scal(in, out, size, elem_size);
#endif
return count;
}
/* ---- Wrappers for implementing blocking ---- */
/* Wrap a function for processing a single block to process an entire buffer in
* parallel. */
int64_t bshuf_blocked_wrap_fun(bshufBlockFunDef fun, const void* in, void* out, \
const size_t size, const size_t elem_size, size_t block_size) {
omp_size_t ii = 0;
int64_t err = 0;
int64_t count, cum_count=0;
size_t last_block_size;
size_t leftover_bytes;
size_t this_iter;
char *last_in;
char *last_out;
ioc_chain C;
ioc_init(&C, in, out);
if (block_size == 0) {
block_size = bshuf_default_block_size(elem_size);
}
if (block_size % BSHUF_BLOCKED_MULT) return -81;
#if defined(_OPENMP)
#pragma omp parallel for schedule(dynamic, 1) \
private(count) reduction(+ : cum_count)
#endif
for (ii = 0; ii < (omp_size_t)( size / block_size ); ii ++) {
count = fun(&C, block_size, elem_size);
if (count < 0) err = count;
cum_count += count;
}
last_block_size = size % block_size;
last_block_size = last_block_size - last_block_size % BSHUF_BLOCKED_MULT;
if (last_block_size) {
count = fun(&C, last_block_size, elem_size);
if (count < 0) err = count;
cum_count += count;
}
if (err < 0) return err;
leftover_bytes = size % BSHUF_BLOCKED_MULT * elem_size;
//this_iter;
last_in = (char *) ioc_get_in(&C, &this_iter);
ioc_set_next_in(&C, &this_iter, (void *) (last_in + leftover_bytes));
last_out = (char *) ioc_get_out(&C, &this_iter);
ioc_set_next_out(&C, &this_iter, (void *) (last_out + leftover_bytes));
memcpy(last_out, last_in, leftover_bytes);
ioc_destroy(&C);
return cum_count + leftover_bytes;
}
/* Bitshuffle a single block. */
int64_t bshuf_bitshuffle_block(ioc_chain *C_ptr, \
const size_t size, const size_t elem_size) {
size_t this_iter;
const void *in;
void *out;
int64_t count;
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + size * elem_size));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
count = bshuf_trans_bit_elem(in, out, size, elem_size);
return count;
}
/* Bitunshuffle a single block. */
int64_t bshuf_bitunshuffle_block(ioc_chain* C_ptr, \
const size_t size, const size_t elem_size) {
size_t this_iter;
const void *in;
void *out;
int64_t count;
in = ioc_get_in(C_ptr, &this_iter);
ioc_set_next_in(C_ptr, &this_iter,
(void*) ((char*) in + size * elem_size));
out = ioc_get_out(C_ptr, &this_iter);
ioc_set_next_out(C_ptr, &this_iter,
(void *) ((char *) out + size * elem_size));
count = bshuf_untrans_bit_elem(in, out, size, elem_size);
return count;
}
/* Write a 64 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint64_BE(void* buf, uint64_t num) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint64_t pow28 = 1 << 8;
for (ii = 7; ii >= 0; ii--) {
b[ii] = num % pow28;
num = num / pow28;
}
}
/* Read a 64 bit unsigned integer from a buffer big endian order. */
uint64_t bshuf_read_uint64_BE(void* buf) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint64_t num = 0, pow28 = 1 << 8, cp = 1;
for (ii = 7; ii >= 0; ii--) {
num += b[ii] * cp;
cp *= pow28;
}
return num;
}
/* Write a 32 bit unsigned integer to a buffer in big endian order. */
void bshuf_write_uint32_BE(void* buf, uint32_t num) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint32_t pow28 = 1 << 8;
for (ii = 3; ii >= 0; ii--) {
b[ii] = num % pow28;
num = num / pow28;
}
}
/* Read a 32 bit unsigned integer from a buffer big endian order. */
uint32_t bshuf_read_uint32_BE(const void* buf) {
int ii;
uint8_t* b = (uint8_t*) buf;
uint32_t num = 0, pow28 = 1 << 8, cp = 1;
for (ii = 3; ii >= 0; ii--) {
num += b[ii] * cp;
cp *= pow28;
}
return num;
}
/* ---- Public functions ----
*
* See header file for description and usage.
*
*/
size_t bshuf_default_block_size(const size_t elem_size) {
// This function needs to be absolutely stable between versions.
// Otherwise encoded data will not be decodable.
size_t block_size = BSHUF_TARGET_BLOCK_SIZE_B / elem_size;
// Ensure it is a required multiple.
block_size = (block_size / BSHUF_BLOCKED_MULT) * BSHUF_BLOCKED_MULT;
return MAX(block_size, BSHUF_MIN_RECOMMEND_BLOCK);
}
int64_t bshuf_bitshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_bitshuffle_block, in, out, size,
elem_size, block_size);
}
int64_t bshuf_bitunshuffle(const void* in, void* out, const size_t size,
const size_t elem_size, size_t block_size) {
return bshuf_blocked_wrap_fun(&bshuf_bitunshuffle_block, in, out, size,
elem_size, block_size);
}
#undef TRANS_BIT_8X8
#undef TRANS_ELEM_TYPE
#undef MAX
#undef CHECK_MULT_EIGHT
#undef CHECK_ERR_FREE
#undef USESSE2
#undef USEAVX2
|
convolution_winograd_transform_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 6;
const int h_tiles = (h - 2) / 6;
const int tiles = w_tiles * h_tiles;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _r06 = vld1q_f32(r0 + 24);
float32x4_t _r07 = vld1q_f32(r0 + 28);
float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f);
float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[7][m], _tmp7m);
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f);
float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(tmp[5][m], _tmp5m);
vst1q_f32(tmp[6][m], _tmp6m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
float* r0_tm_6 = r0_tm_0 + tiles * 24;
float* r0_tm_7 = r0_tm_0 + tiles * 28;
for (int m = 0; m < 8; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f);
float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f);
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f);
float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
vst1q_f32(r0_tm_6, _r0tm6);
vst1q_f32(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 32;
r0_tm_1 += tiles * 32;
r0_tm_2 += tiles * 32;
r0_tm_3 += tiles * 32;
r0_tm_4 += tiles * 32;
r0_tm_5 += tiles * 32;
r0_tm_6 += tiles * 32;
r0_tm_7 += tiles * 32;
}
}
}
}
}
static void conv3x3s1_winograd63_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[6][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
const float* output0_tm_6 = output0_tm_0 + tiles * 24;
const float* output0_tm_7 = output0_tm_0 + tiles * 28;
float* output0 = out0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _out0tm6 = vld1q_f32(output0_tm_6);
float32x4_t _out0tm7 = vld1q_f32(output0_tm_7);
float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f));
float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[4][m], _tmp4m);
float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f));
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[5][m], _tmp5m);
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06);
float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 16, _out04);
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)));
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 12, _out03);
vst1q_f32(output0 + 20, _out05);
output0 += outw * 4;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 4;
const int h_tiles = (h - 2) / 4;
const int tiles = w_tiles * h_tiles;
// const float itm[6][6] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[6][6][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f);
float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f);
float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f);
float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
vst1q_f32(tmp[5][m], _tmp5m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f);
float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 24;
r0_tm_1 += tiles * 24;
r0_tm_2 += tiles * 24;
r0_tm_3 += tiles * 24;
r0_tm_4 += tiles * 24;
r0_tm_5 += tiles * 24;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 4;
const int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[4][6][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
float* output0 = out0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b);
float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f);
float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f);
float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 0; m < 4; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b));
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 12, _out03);
output0 += outw * 4;
}
}
}
}
}
|
GB_reduce_panel.c | //------------------------------------------------------------------------------
// GB_reduce_panel: s=reduce(A), reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Reduce a matrix to a scalar using a panel-based method for built-in
// operators. No typecasting is performed.
{
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const GB_ATYPE *GB_RESTRICT Ax = A->x ;
int64_t anz = GB_NNZ (A) ;
ASSERT (anz > 0) ;
#if GB_IS_ANY_MONOID
// the ANY monoid can take any entry, and terminate immediately
s = Ax [anz-1] ;
#else
//--------------------------------------------------------------------------
// typecast workspace
//--------------------------------------------------------------------------
// ctype W [ntasks] ;
GB_CTYPE *GB_RESTRICT W = (GB_CTYPE *) W_space ;
//--------------------------------------------------------------------------
// reduce A to a scalar
//--------------------------------------------------------------------------
if (nthreads == 1)
{
//----------------------------------------------------------------------
// load the Panel with the first entries
//----------------------------------------------------------------------
GB_ATYPE Panel [GB_PANEL] ;
int64_t first_panel_size = GB_IMIN (GB_PANEL, anz) ;
for (int64_t k = 0 ; k < first_panel_size ; k++)
{
Panel [k] = Ax [k] ;
}
#if GB_HAS_TERMINAL
int panel_count = 0 ;
#endif
//----------------------------------------------------------------------
// reduce all entries to the Panel
//----------------------------------------------------------------------
for (int64_t p = GB_PANEL ; p < anz ; p += GB_PANEL)
{
if (p + GB_PANEL > anz)
{
// last partial panel
for (int64_t k = 0 ; k < anz-p ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
}
else
{
// full panel
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
#if GB_HAS_TERMINAL
panel_count-- ;
if (panel_count <= 0)
{
// check for early exit only every 256 panels
panel_count = 256 ;
int count = 0 ;
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
count += (Panel [k] == GB_TERMINAL_VALUE) ;
}
if (count > 0)
{
break ;
}
}
#endif
}
}
//----------------------------------------------------------------------
// s = reduce (Panel)
//----------------------------------------------------------------------
s = Panel [0] ;
for (int64_t k = 1 ; k < first_panel_size ; k++)
{
// s = op (s, Panel [k]) ;
GB_ADD_ARRAY_TO_SCALAR (s, Panel, k) ;
}
}
else
{
//----------------------------------------------------------------------
// all tasks share a single early_exit flag
//----------------------------------------------------------------------
// If this flag gets set, all tasks can terminate early
#if GB_HAS_TERMINAL
bool early_exit = false ;
#endif
//----------------------------------------------------------------------
// each thread reduces its own slice in parallel
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the work for this task
//------------------------------------------------------------------
// Task tid reduces Ax [pstart:pend-1] to the scalar W [tid]
int64_t pstart, pend ;
GB_PARTITION (pstart, pend, anz, tid, ntasks) ;
GB_ATYPE t = Ax [pstart] ;
//------------------------------------------------------------------
// skip this task if the terminal value has already been reached
//------------------------------------------------------------------
#if GB_HAS_TERMINAL
// check if another task has called for an early exit
bool my_exit ;
#if GB_MICROSOFT
#pragma omp critical (GB_reduce_panel)
my_exit = early_exit ;
#else
GB_ATOMIC_READ
my_exit = early_exit ;
#endif
if (!my_exit)
#endif
//------------------------------------------------------------------
// do the reductions for this task
//------------------------------------------------------------------
{
//--------------------------------------------------------------
// load the Panel with the first entries
//--------------------------------------------------------------
GB_ATYPE Panel [GB_PANEL] ;
int64_t my_anz = pend - pstart ;
int64_t first_panel_size = GB_IMIN (GB_PANEL, my_anz) ;
for (int64_t k = 0 ; k < first_panel_size ; k++)
{
Panel [k] = Ax [pstart + k] ;
}
#if GB_HAS_TERMINAL
int panel_count = 0 ;
#endif
//--------------------------------------------------------------
// reduce all entries to the Panel
//--------------------------------------------------------------
for (int64_t p = pstart + GB_PANEL ; p < pend ; p += GB_PANEL)
{
if (p + GB_PANEL > pend)
{
// last partial panel
for (int64_t k = 0 ; k < pend-p ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
}
else
{
// full panel
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
// Panel [k] = op (Panel [k], Ax [p+k]) ;
GB_ADD_ARRAY_TO_ARRAY (Panel, k, Ax, p+k) ;
}
#if GB_HAS_TERMINAL
panel_count-- ;
if (panel_count <= 0)
{
// check for early exit only every 256 panels
panel_count = 256 ;
int count = 0 ;
for (int64_t k = 0 ; k < GB_PANEL ; k++)
{
count += (Panel [k] == GB_TERMINAL_VALUE) ;
}
if (count > 0)
{
break ;
}
}
#endif
}
}
//--------------------------------------------------------------
// t = reduce (Panel)
//--------------------------------------------------------------
t = Panel [0] ;
for (int64_t k = 1 ; k < first_panel_size ; k++)
{
// t = op (t, Panel [k]) ;
GB_ADD_ARRAY_TO_SCALAR (t, Panel, k) ;
}
#if GB_HAS_TERMINAL
if (t == GB_TERMINAL_VALUE)
{
// tell all other tasks to exit early
#if GB_MICROSOFT
#pragma omp critical (GB_reduce_panel)
early_exit = true ;
#else
GB_ATOMIC_WRITE
early_exit = true ;
#endif
}
#endif
}
//------------------------------------------------------------------
// save the results of this task
//------------------------------------------------------------------
W [tid] = t ;
}
//----------------------------------------------------------------------
// sum up the results of each slice using a single thread
//----------------------------------------------------------------------
s = W [0] ;
for (int tid = 1 ; tid < ntasks ; tid++)
{
// s = op (s, W [tid]), no typecast
GB_ADD_ARRAY_TO_SCALAR (s, W, tid) ;
}
}
#endif
}
|
foo.c | #include <stdio.h>
#include <stdlib.h>
void foo() {
int i;
#pragma omp target
#pragma omp parallel for
for (i = 0; i < 2; i++) {
printf("Test 1.\n");
}
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__times_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int16)
// A*D function (colscale): GB (_AxD__times_int16)
// D*A function (rowscale): GB (_DxB__times_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__times_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__times_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int16)
// C=scalar+B GB (_bind1st__times_int16)
// C=scalar+B' GB (_bind1st_tran__times_int16)
// C=A+scalar GB (_bind2nd__times_int16)
// C=A'+scalar GB (_bind2nd_tran__times_int16)
// C type: int16_t
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_INT16 || GxB_NO_TIMES_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
type_game.c | // A small game is played between 5 players. The players are asked to type a 6 letter word in English in parallel.
// The letters are numbered as A=1, B=2,….,Z=26. The sum of the numbers corresponding to the letters in the word is counted.
// All the players are given three parallel chances. The cumulative sum of words is computed after the
// fifth chance and the player with highest cumulative sum is declared as winner. Implement a solution for this using OpenMP.
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <time.h>
#include <omp.h>
#ifndef TOTAL_THREADS
#define TOTAL_THREADS 5
#endif
#ifndef MAX
#define MAX 26
#endif
int randomAssignment()
{
return rand();
}
int main()
{
srand(time(0));
if (!getenv("OMP_NUM_THREADS"))
{
omp_set_num_threads(TOTAL_THREADS);
}
int maxsum = 0;
int tidmax = 0;
char alphabet[MAX] = {'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z'};
#pragma omp parallel
{
int tid = omp_get_thread_num();
int total_cost = 0;
for (int i = 0; i < 5; i++)
{
char word[6];
for (int i = 0; i < 6; i++)
{
word[i] = alphabet[randomAssignment() % MAX];
}
printf("6 letter word for player %d is %s\n", tid, word);
int cost = 0;
for (int j = 0; j < 6; j++)
{
cost += (int)word[j] - (int)'a' + 1;
}
total_cost += cost;
}
#pragma omp critical
{
if (total_cost > maxsum)
{
maxsum = total_cost;
tidmax = tid;
}
}
}
printf("Player %d won with score %d\n", tidmax, maxsum);
return 0;
} |
c_qsort.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
(LICENSE file) along with this program; if not, write to
the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
Boston, MA 02111-1307 USA
FILE: c_qsort.c
VERSION: 1.0
DATE: May 2004
AUTHOR: F. de Sande
COMMENTS TO: sande@csi.ull.es
DESCRIPTION: Parallel implementation of Quicksort using OpenMP
Sorts an integer array
COMMENTS: The code requires nested Parallelism.
REFERENCES: C. A. R. Hoare,
ACM Algorithm 64}: Quicksort",
Communications of the ACM",
vol. 4, no. 7, pg. 321. Jul 1961
http://en.wikipedia.org/wiki/Quicksort
BASIC PRAGMAS: parallel for
USAGE: ./c_qsort.par 2000000
INPUT: The size (in K) of the vector to sort
OUTPUT: The code tests that the vector is sorted
FILE FORMATS: -
RESTRICTIONS: -
REVISION HISTORY:
**************************************************************************/
#include "OmpSCR.h"
#define NUM_ARGS 1
#define NUM_TIMERS 1
#define KILO (1024)
#define MEGA (1024 * 1024)
#define DEFAULT_SIZE (2 * MEGA)
#define MAXSIZE (9 * MEGA)
#define NUM_STEPS 10 /* No. of iterations (number of vectors to sort) */
char USAGE_STR[] = "<size_in_Kb>";
int SIZE;
int array[MAXSIZE];
/* -----------------------------------------------------------------------
PROTOTYPES
* ----------------------------------------------------------------------- */
void initialize(int *v, int seed);
void testit(int *v);
void qs(int *v, int first, int last);
/* -----------------------------------------------------------------------
IMPLEMENTATION
* ----------------------------------------------------------------------- */
/* -----------------------------------------------------------------------
Sets randomly the values for the array
* ----------------------------------------------------------------------- */
void initialize(int *v, int seed) {
unsigned i;
srandom(seed);
for(i = 0; i < SIZE; i++)
v[i] = (int)random();
}
/* -----------------------------------------------------------------------
Tests the result
* ----------------------------------------------------------------------- */
void testit(int *v) {
register int k;
int not_sorted = 0;
for (k = 0; k < SIZE - 1; k++)
if (v[k] > v[k + 1]) {
not_sorted = 1;
break;
}
if (not_sorted)
printf("Array NOT sorted.\n");
else
printf("Array sorted.\n");
}
/* ----------------------------------------------------------------------- */
void qs(int *v, int first, int last) {
int start[2], end[2], pivot, i, temp;
if (first < last) {
start[1] = first;
end[0] = last;
pivot = v[(first + last) / 2];
while (start[1] <= end[0]) {
while (v[start[1]] < pivot)
start[1]++;
while (pivot < v[end[0]])
end[0]--;
if (start[1] <= end[0]) {
temp = v[start[1]];
v[start[1]] = v[end[0]];
v[end[0]] = temp;
start[1]++;
end[0]--;
}
}
start[0] = first;
end[1] = last;
#pragma omp parallel
{
#pragma omp for nowait
for(i = 0; i <= 1; i++) {
qs(v, start[i], end[i]);
}
}
}
}
/* ----------------------------------------------------------------------- */
int main(int argc, char *argv[]) {
int STEP, NUMTHREADS;
double total_time;
char *PARAM_NAMES[NUM_ARGS] = {"Size (in K)"};
char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time" };
char *DEFAULT_VALUES[NUM_ARGS] = {"2048 K"};
NUMTHREADS = omp_get_max_threads();
OSCR_init (NUMTHREADS, "Quicksort", "Use 'qsort' <size (in K)>", NUM_ARGS,
PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
argc, argv);
SIZE = OSCR_getarg_int(1);
if (SIZE > MAXSIZE) {
printf("Size: %d Maximum size: %d\n", SIZE, MAXSIZE);
exit(-1);
}
/* Default: DEFAULT_SIZE */
for (STEP = 0; STEP < NUM_STEPS; STEP++) {
initialize(array, STEP);
OSCR_timer_start(0);
qs(array, 0, SIZE-1);
OSCR_timer_stop(0);
testit(array);
}
total_time = OSCR_timer_read(0);
OSCR_report(1, TIMERS_NAMES);
printf("\n \t# THREADS \tSIZE \tSTEPS \tTIME (secs.) \n");
printf("\t%d \t\t%d \t%d \t%14.6lf \n", NUMTHREADS, SIZE, NUM_STEPS, total_time);
} /* main */
/*
* vim:ts=2:sw=2:
*/
|
mozilla_ng_fmt_plug.c | /*
* Cracker for Mozilla's key3.db's master password.
*
* All the real logic here is borrowed from Milen Rangelov's Hashkill project
* and from Deque's article.
*
* Thanks to Jim Fougeron for all the help!
*
* This software is Copyright (c) 2014, Sanju Kholia <sanju.kholia [at]
* gmail.com> and Dhiru Kholia <dhiru [at] openwall.com>, and it is hereby
* released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mozilla;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mozilla);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // XXX
#endif
#endif
#include "arch.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#include "stdint.h"
#include <openssl/des.h>
#include "sha.h"
#define FORMAT_LABEL "Mozilla"
#define FORMAT_NAME "Mozilla key3.db"
#define FORMAT_TAG "$mozilla$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "SHA1 3DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"$mozilla$*3*20*1*5199adfab24e85e3f308bacf692115f23dcd4f8f*11*2a864886f70d010c050103*16*9debdebd4596b278de029b2b2285ce2e*20*2c4d938ccb3f7f1551262185ccee947deae3b8ae", "12345678"},
{"$mozilla$*3*20*1*4f184f0d3c91cf52ee9190e65389b4d4c8fc66f2*11*2a864886f70d010c050103*16*590d1771368107d6be64844780707787*20*b8458c712ffcc2ff938409804cf3805e4bb7d722", "openwall"},
{"$mozilla$*3*20*1*897f35ff10348f0d3a7739dbf0abddc62e2e64c3*11*2a864886f70d010c050103*16*1851b917997b3119f82b8841a764db62*20*197958dd5e114281f59f9026ad8b7cfe3de7196a", "password"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *saved_len;
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
SHA_CTX pctx;
int global_salt_length;
unsigned char global_salt[20];
int local_salt_length; // entry-salt (ES)
unsigned char local_salt[20];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_num_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *keepptr;
int res;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
return 0;
keepptr=strdup(ciphertext);
p = &keepptr[TAG_LENGTH];
if (*p != '*')
goto err;
++p;
if ((p = strtokm(p, "*")) == NULL) /* version */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res != 3) /* we only know about this particular version */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* local_salt_length */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* nnLen (we ignore nnlen) */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* local_salt */
goto err;
if (strlen(p) /2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* oidDatalen */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* oidData */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* password_check_length */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* password_check */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* global_salt_length */
goto err;
if(!isdec(p))
goto err;
res = atoi(p);
if (res > 20)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* global_salt */
goto err;
if (strlen(p) / 2 != res)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keepptr);
return 1;
err:
MEM_FREE(keepptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
int i;
static struct custom_salt cs;
char *p, *q;
memset(&cs, 0, SALT_SIZE); // cs.local_salt needs to be zero padded to length 20
p = ciphertext + TAG_LENGTH;
q = strchr(p, '*'); // version
p = q + 1;
q = strchr(p, '*'); // local_salt_length
p = q + 1;
cs.local_salt_length = atoi(p);
q = strchr(p, '*'); // nnLen
p = q + 1;
q = strchr(p, '*'); // local_salt
p = q + 1;
for (i = 0; i < cs.local_salt_length; i++)
cs.local_salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) |
atoi16[ARCH_INDEX(p[2 * i + 1])];
q = strchr(p, '*'); // oidLen (unused)
p = q + 1;
q = strchr(p, '*'); // oidData (unused)
p = q + 1;
q = strchr(p, '*'); // password_check_length
p = q + 1;
// Not stored in salt. This is the binary length
q = strchr(p, '*'); // password_check
p = q + 1;
// Not stored in salt, this is the binary.
q = strchr(p, '*'); // global_salt_length
p = q + 1;
cs.global_salt_length = atoi(p);
q = strchr(p, '*'); // global_salt
p = q + 1;
for (i = 0; i < cs.global_salt_length; i++)
cs.global_salt[i] = atoi16[ARCH_INDEX(p[i * 2])]
* 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])];
// Calculate partial sha1 data for password hashing
SHA1_Init(&cs.pctx);
SHA1_Update(&cs.pctx, cs.global_salt, cs.global_salt_length);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p, *q;
int i;
p = ciphertext + TAG_LENGTH;
q = strchr(p, '*'); // version
p = q + 1;
q = strchr(p, '*'); // local_salt_length
p = q + 1;
q = strchr(p, '*'); // nnLen
p = q + 1;
q = strchr(p, '*'); // local_salt
p = q + 1;
q = strchr(p, '*'); // oidLen (unused)
p = q + 1;
q = strchr(p, '*'); // oidData (unused)
p = q + 1;
q = strchr(p, '*'); // password_check_length
p = q + 1;
q = strchr(p, '*'); // password_check
p = q + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
// http://www.drh-consultancy.demon.co.uk/key3.html
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
SHA_CTX ctx, ctxi, ctxo;
int i;
union {
unsigned char uc[64];
uint32_t ui[64/4];
} pad;
unsigned char buffer[20];
unsigned char tk[20];
unsigned char key[40];
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
// HP = SHA1(global-salt||password)
// Copy already calculated partial hash data
memcpy(&ctx, &cur_salt->pctx, sizeof(SHA_CTX));
SHA1_Update(&ctx, saved_key[index], saved_len[index]);
SHA1_Final(buffer, &ctx);
// CHP = SHA1(HP||entry-salt) // entry-salt (ES) is local_salt
SHA1_Init(&ctx);
SHA1_Update(&ctx, buffer, 20);
SHA1_Update(&ctx, cur_salt->local_salt, cur_salt->local_salt_length);
SHA1_Final(buffer, &ctx);
// Step 0 for all hmac, store off the first half (the key is the same for all 3)
// this will avoid having to setup the ipad/opad 2 times, and also avoids 4 SHA calls
// reducing the hmac calls from 12 SHA limbs, down to 8 and ipad/opad loads from 3
// down to 1. It adds 4 CTX memcpy's, but that is a very fair trade off.
SHA1_Init(&ctxi);
SHA1_Init(&ctxo);
memset(pad.uc, 0x36, 64);
for (i = 0; i < 20; ++i)
pad.uc[i] ^= buffer[i];
SHA1_Update(&ctxi, pad.uc, 64);
for (i = 0; i < 64/4; ++i)
pad.ui[i] ^= 0x36363636^0x5c5c5c5c;
SHA1_Update(&ctxo, pad.uc, 64);
// k1 = HMAC(PES||ES) // use CHP as the key, PES is ES which is zero padded to length 20
// NOTE, memcpy ctxi/ctxo to harvest off the preloaded hmac key
memcpy(&ctx, &ctxi, sizeof(ctx));
SHA1_Update(&ctx, cur_salt->local_salt, 20);
SHA1_Update(&ctx, cur_salt->local_salt, cur_salt->local_salt_length);
SHA1_Final(buffer, &ctx);
memcpy(&ctx, &ctxo, sizeof(ctx));
SHA1_Update(&ctx, buffer, 20);
SHA1_Final(key, &ctx);
// tk = HMAC(PES) // use CHP as the key
// NOTE, memcpy ctxi/ctxo to harvest off the preloaded hmac key
memcpy(&ctx, &ctxi, sizeof(ctx));
SHA1_Update(&ctx, cur_salt->local_salt, 20);
SHA1_Final(buffer, &ctx);
memcpy(&ctx, &ctxo, sizeof(ctx));
SHA1_Update(&ctx, buffer, 20);
SHA1_Final(tk, &ctx);
// k2 = HMAC(tk||ES) // use CHP as the key
// NOTE, ctxi and ctxo are no longer needed after this hmac, so we simply use them
SHA1_Update(&ctxi, tk, 20);
SHA1_Update(&ctxi, cur_salt->local_salt, cur_salt->local_salt_length);
SHA1_Final(buffer, &ctxi);
SHA1_Update(&ctxo, buffer, 20);
SHA1_Final(key+20, &ctxo);
// k = k1||k2 // encrypt "password-check" string using this key
DES_set_key((DES_cblock *) key, &ks1);
DES_set_key((DES_cblock *) (key+8), &ks2);
DES_set_key((DES_cblock *) (key+16), &ks3);
memcpy(ivec, key + 32, 8); // last 8 bytes!
// PKCS#5 padding (standard block padding)
DES_ede3_cbc_encrypt((unsigned char*)"password-check\x02\x02", (unsigned char*)crypt_out[index], 16, &ks1, &ks2, &ks3, &ivec, DES_ENCRYPT);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void mozilla_set_key(char *key, int index)
{
saved_len[index] = strlen(key);
strncpy(saved_key[index], key, sizeof(saved_key[0]));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_mozilla = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
BINARY_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
mozilla_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
GB_binop__plus_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__plus_int8
// A.*B function (eWiseMult): GB_AemultB__plus_int8
// A*D function (colscale): GB_AxD__plus_int8
// D*A function (rowscale): GB_DxB__plus_int8
// C+=B function (dense accum): GB_Cdense_accumB__plus_int8
// C+=b function (dense accum): GB_Cdense_accumb__plus_int8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_int8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_int8
// C=scalar+B GB_bind1st__plus_int8
// C=scalar+B' GB_bind1st_tran__plus_int8
// C=A+scalar GB_bind2nd__plus_int8
// C=A'+scalar GB_bind2nd_tran__plus_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x + y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_INT8 || GxB_NO_PLUS_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__plus_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__plus_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__plus_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__plus_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__plus_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__plus_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__plus_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__plus_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__plus_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__plus_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB_bind1st_tran__plus_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB_bind2nd_tran__plus_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
countpairs_s_mu_mocks_impl_double.c | /* This file is auto-generated from countpairs_s_mu_mocks_impl.c.src */
#ifndef DOUBLE_PREC
#define DOUBLE_PREC
#endif
// # -*- mode: c -*-
/* File: countpairs_s_mu_mocks_impl.c.src */
/*
This file is a part of the Corrfunc package
Copyright (C) 2015-- Manodeep Sinha (manodeep@gmail.com)
License: MIT LICENSE. See LICENSE file under the top-level
directory at https://github.com/manodeep/Corrfunc/
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <gsl/gsl_interp.h>
#include "countpairs_s_mu_mocks_impl_double.h"
#include "countpairs_s_mu_mocks_kernels_double.c"
#include "cellarray_mocks_double.h"
#include "gridlink_mocks_impl_double.h"
#include "defs.h"
#include "utils.h"
#include "cosmology_params.h"
#include "set_cosmo_dist.h"
#include "cpu_features.h"
#include "progressbar.h"
#include "proj_functions_double.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
int interrupt_status_DDsmu_mocks_double=EXIT_SUCCESS;
void interrupt_handler_countpairs_s_mu_mocks_double(int signo)
{
fprintf(stderr,"Received signal = `%s' (signo = %d). Aborting \n",strsignal(signo), signo);
interrupt_status_DDsmu_mocks_double = EXIT_FAILURE;
}
int check_ra_dec_cz_s_mu_double(const int64_t N, double *phi, double *theta, double *cz)
{
if(N==0) {
return EXIT_SUCCESS;
}
if(phi == NULL || theta == NULL || cz == NULL) {
fprintf(stderr,"Input arrays can not be NULL. Have RA = %p DEC = %p cz = %p\n", phi, theta, cz);
return EXIT_FAILURE;
}
int fix_cz = 0;
int fix_ra = 0;
int fix_dec = 0;
const double max_cz_threshold = 10.0;//if I find that max cz is smaller than this threshold, then I will assume z has been supplied rather than cz
double max_cz = 0.0;
//Check input cz -> ensure that cz contains cz and not z
for(int64_t i=0;i<N;i++) {
if(cz[i] > max_cz) max_cz = cz[i];
if(phi[i] < 0.0) {
fix_ra = 1;
}
if(theta[i] > 90.0) {
fix_dec = 1;
}
if(theta[i] > 180) {
fprintf(stderr,"theta[%"PRId64"] = %"REAL_FORMAT"should be less than 180 deg\n", i, theta[i]);
return EXIT_FAILURE;
}
}
if(max_cz < max_cz_threshold) fix_cz = 1;
//Only run the loop if something needs to be fixed
if(fix_cz==1 || fix_ra == 1 || fix_dec == 1) {
if(fix_ra == 1) {
fprintf(stderr,"%s> Out of range values found for ra. Expected ra to be in the range [0.0,360.0]. Found ra values in [-180,180] -- fixing that\n", __FUNCTION__);
}
if(fix_dec == 1) {
fprintf(stderr,"%s> Out of range values found for dec. Expected dec to be in the range [-90.0,90.0]. Found dec values in [0,180] -- fixing that\n", __FUNCTION__);
}
if(fix_cz == 1) {
fprintf(stderr,"%s> Out of range values found for cz. Expected input to be `cz' but found `z' instead. max_cz (found in input) = %"REAL_FORMAT" threshold "
"= %"REAL_FORMAT"\n",__FUNCTION__,max_cz,max_cz_threshold);
}
for(int64_t i=0;i<N;i++) {
if(fix_ra==1) {
phi[i] += (double) 180.0;
}
if(fix_dec==1) {
theta[i] -= (double) 90.0;
}
if(fix_cz == 1) {
cz[i] *= (double) SPEED_OF_LIGHT;//input was z -> convert to cz
}
}
}
return EXIT_SUCCESS;
}
countpairs_mocks_func_ptr_double countpairs_s_mu_mocks_driver_double(const struct config_options *options)
{
static countpairs_mocks_func_ptr_double function = NULL;
static isa old_isa=-1;
if(old_isa == options->instruction_set) {
return function;
}
/* Array of function pointers */
countpairs_mocks_func_ptr_double allfunctions[] = {
#ifdef __AVX__
countpairs_s_mu_mocks_avx_intrinsics_double,
#endif
#ifdef __SSE4_2__
countpairs_s_mu_mocks_sse_intrinsics_double,
#endif
countpairs_s_mu_mocks_fallback_double
};
const int num_functions = sizeof(allfunctions)/sizeof(void *);
const int fallback_offset = num_functions - 1;
#if defined(__AVX__) || defined __SSE4_2__
const int highest_isa = instrset_detect();
#endif
int curr_offset = 0;
/* Now check if AVX is supported by the CPU */
int avx_offset = fallback_offset;
#ifdef __AVX__
avx_offset = highest_isa >= 7 ? curr_offset:fallback_offset;
curr_offset++;
#endif
/* Is the SSE function supported at runtime and enabled at compile-time?*/
int sse_offset = fallback_offset;
#ifdef __SSE4_2__
sse_offset = highest_isa >= 6 ? curr_offset:fallback_offset;
curr_offset++;
#endif
if( curr_offset != fallback_offset) {
fprintf(stderr,"ERROR: Bug in code (current offset = %d *should equal* fallback function offset = %d)\n",
curr_offset, fallback_offset);
return NULL;
}
int function_dispatch=0;
/* Check that cpu supports feature */
if(options->instruction_set >= 0) {
switch(options->instruction_set) {
case(AVX512F):
case(AVX2):
case(AVX):function_dispatch=avx_offset;break;
case(SSE42): function_dispatch=sse_offset;break;
default:function_dispatch=fallback_offset;break;
}
}
if(function_dispatch >= num_functions) {
fprintf(stderr,"In %s> ERROR: Could not resolve the correct function.\n Function index = %d must lie between [0, %d)\n",
__FUNCTION__, function_dispatch, num_functions);
return NULL;
}
function = allfunctions[function_dispatch];
old_isa = options->instruction_set;
if(options->verbose){
// This must be first (AVX/SSE may be aliased to fallback)
if(function_dispatch == fallback_offset){
fprintf(stderr,"Using fallback kernel\n");
} else if(function_dispatch == avx_offset){
fprintf(stderr,"Using AVX kernel\n");
} else if(function_dispatch == sse_offset){
fprintf(stderr,"Using SSE kernel\n");
} else {
printf("Unknown kernel!\n");
}
}
return function;
}
int countpairs_mocks_s_mu_double(const int64_t ND1, double *ra1, double *dec1, double *czD1,
const int64_t ND2, double *ra2, double *dec2, double *czD2,
const int numthreads,
const int autocorr,
const char *sbinfile,
const double max_mu,
const int nmu_bins,
const int cosmology,
results_countpairs_mocks_s_mu *results,
struct config_options *options, struct extra_options *extra)
{
if(options->float_type != sizeof(double)) {
fprintf(stderr,"ERROR: In %s> Can only handle arrays of size=%zu. Got an array of size = %zu\n",
__FUNCTION__, sizeof(double), options->float_type);
return EXIT_FAILURE;
}
// If no extra options were passed, create dummy options
// This allows us to pass arguments like "extra->weights0" below;
// they'll just be NULLs, which is the correct behavior
struct extra_options dummy_extra;
if(extra == NULL){
weight_method_t dummy_method = NONE;
dummy_extra = get_extra_options(dummy_method);
extra = &dummy_extra;
}
int need_weightavg = extra->weight_method != NONE;
options->sort_on_z = 1;
struct timeval t0;
if(options->c_api_timer) {
gettimeofday(&t0, NULL);
}
if(options->fast_divide_and_NR_steps >= MAX_FAST_DIVIDE_NR_STEPS) {
fprintf(stderr, ANSI_COLOR_MAGENTA"Warning: The number of requested Newton-Raphson steps = %u is larger than max. allowed steps = %u."
" Switching to a standard divide"ANSI_COLOR_RESET"\n",
options->fast_divide_and_NR_steps, MAX_FAST_DIVIDE_NR_STEPS);
options->fast_divide_and_NR_steps = 0;
}
//Check inputs
if(ND1 == 0 || (autocorr == 0 && ND2 == 0)) {
return EXIT_SUCCESS;
}
//Check inputs
int status1 = check_ra_dec_cz_s_mu_double(ND1, ra1, dec1, czD1);
if(status1 != EXIT_SUCCESS) {
return status1;
}
if(autocorr==0) {
int status2 = check_ra_dec_cz_s_mu_double(ND2, ra2, dec2, czD2);
if(status2 != EXIT_SUCCESS) {
return status2;
}
}
#if defined(_OPENMP)
omp_set_num_threads(numthreads);
#else
(void) numthreads;
#endif
if(options->max_cells_per_dim == 0) {
fprintf(stderr,"Warning: Max. cells per dimension is set to 0 - resetting to `NLATMAX' = %d\n", NLATMAX);
options->max_cells_per_dim = NLATMAX;
}
for(int i=0;i<3;i++) {
if(options->bin_refine_factors[i] < 1) {
fprintf(stderr,"Warning: bin refine factor along axis = %d *must* be >=1. Instead found bin refine factor =%d\n",
i, options->bin_refine_factors[i]);
reset_bin_refine_factors(options);
break;/* all factors have been reset -> no point continuing with the loop */
}
}
/* setup interrupt handler -> mostly useful during the python execution.
Let's Ctrl-C abort the extension */
SETUP_INTERRUPT_HANDLERS(interrupt_handler_countpairs_s_mu_mocks_double);
//Try to initialize cosmology - code will exit if comoslogy is not implemented.
//Putting in a different scope so I can call the variable status
{
int status = init_cosmology(cosmology);
if(status != EXIT_SUCCESS) {
return status;
}
}
/***********************
*initializing the bins
************************/
double *supp;
int nsbin;
double smin,smax;
setup_bins(sbinfile,&smin,&smax,&nsbin,&supp);
if( ! (smin > 0.0 && smax > 0.0 && smin < smax && nsbin > 0)) {
fprintf(stderr,"Error: Could not setup with S bins correctly. (smin = %lf, smax = %lf, with nbins = %d). Expected non-zero smin/smax with smax > smin and nbins >=1 \n",
smin, smax, nsbin);
return EXIT_FAILURE;
}
if(max_mu <= 0.0 || max_mu > 1.0) {
fprintf(stderr,"Error: max_mu (max. value for the cosine of the angle with line of sight) must be greater than 0 and at most 1).\n"
"The passed value is max_mu = %lf. Please change it to be > 0 and <= 1.0\n", max_mu);
return EXIT_FAILURE;
}
if(nmu_bins < 1 ) {
fprintf(stderr,"Error: Number of mu bins = %d must be at least 1\n", nmu_bins);
return EXIT_FAILURE;
}
//Change cz into co-moving distance
double *D1 = NULL, *D2 = NULL;
if(options->is_comoving_dist == 0) {
D1 = my_malloc(sizeof(*D1),ND1);
D2 = autocorr == 0 ? my_malloc(sizeof(*D2),ND2):D1;
} else {
D1 = czD1;
D2 = autocorr == 0 ? czD2:czD1;
}
if(D1 == NULL || D2 == NULL) {
free(D1);free(D2);
return EXIT_FAILURE;
}
if(options->is_comoving_dist == 0) {
//Setup variables to do the cz->comoving distance
double czmax = 0.0;
const double inv_speed_of_light = 1.0/SPEED_OF_LIGHT;
get_max_double(ND1, czD1, &czmax);
if(autocorr == 0) {
get_max_double(ND2, czD2, &czmax);
}
const double zmax = czmax * inv_speed_of_light + 0.01;
const int workspace_size = 10000;
double *interp_redshift = my_calloc(sizeof(*interp_redshift), workspace_size);//the interpolation is done in 'z' and not in 'cz'
double *interp_comoving_dist = my_calloc(sizeof(*interp_comoving_dist),workspace_size);
int Nzdc = set_cosmo_dist(zmax, workspace_size, interp_redshift, interp_comoving_dist, cosmology);
if(Nzdc < 0) {
free(interp_redshift);free(interp_comoving_dist);
return EXIT_FAILURE;
}
gsl_interp *interpolation;
gsl_interp_accel *accelerator;
accelerator = gsl_interp_accel_alloc();
interpolation = gsl_interp_alloc (gsl_interp_linear,Nzdc);
gsl_interp_init(interpolation, interp_redshift, interp_comoving_dist, Nzdc);
for(int64_t i=0;i<ND1;i++) {
D1[i] = gsl_interp_eval(interpolation, interp_redshift, interp_comoving_dist, czD1[i]*inv_speed_of_light, accelerator);
}
if(autocorr==0) {
for(int64_t i=0;i<ND2;i++) {
D2[i] = gsl_interp_eval(interpolation, interp_redshift, interp_comoving_dist, czD2[i]*inv_speed_of_light, accelerator);
}
}
free(interp_redshift);free(interp_comoving_dist);
gsl_interp_free(interpolation);
gsl_interp_accel_free(accelerator);
}
double *X1 = my_malloc(sizeof(*X1), ND1);
double *Y1 = my_malloc(sizeof(*Y1), ND1);
double *Z1 = my_malloc(sizeof(*Z1), ND1);
if(X1 == NULL || Y1 == NULL || Z1 == NULL) {
free(X1);free(Y1);free(Z1);
return EXIT_FAILURE;
}
for(int64_t i=0;i<ND1;i++) {
X1[i] = D1[i]*COSD(dec1[i])*COSD(ra1[i]);
Y1[i] = D1[i]*COSD(dec1[i])*SIND(ra1[i]);
Z1[i] = D1[i]*SIND(dec1[i]);
}
double *X2,*Y2,*Z2;
if(autocorr==0) {
X2 = my_malloc(sizeof(*X2), ND2);
Y2 = my_malloc(sizeof(*Y2), ND2);
Z2 = my_malloc(sizeof(*Z2), ND2);
for(int64_t i=0;i<ND2;i++) {
X2[i] = D2[i]*COSD(dec2[i])*COSD(ra2[i]);
Y2[i] = D2[i]*COSD(dec2[i])*SIND(ra2[i]);
Z2[i] = D2[i]*SIND(dec2[i]);
}
} else {
X2 = X1;
Y2 = Y1;
Z2 = Z1;
}
double supp_sqr[nsbin];
for(int i=0; i < nsbin;i++) {
supp_sqr[i] = supp[i]*supp[i];
}
const double mu_max = (double) max_mu;
double xmin=1e10,ymin=1e10,zmin=1e10;
double xmax=-1e10,ymax=-1e10,zmax=-1e10;
get_max_min_data_double(ND1, X1, Y1, Z1, &xmin, &ymin, &zmin, &xmax, &ymax, &zmax);
if(autocorr==0) {
get_max_min_data_double(ND2, X2, Y2, Z2, &xmin, &ymin, &zmin, &xmax, &ymax, &zmax);
}
const double xdiff = xmax-xmin;
const double ydiff = ymax-ymin;
const double zdiff = zmax-zmin;
if(get_bin_refine_scheme(options) == BINNING_DFL) {
if(smax < 0.05*xdiff) {
options->bin_refine_factors[0] = 1;
}
if(smax < 0.05*ydiff) {
options->bin_refine_factors[1] = 1;
}
if(smax < 0.05*zdiff) {
options->bin_refine_factors[2] = 1;
}
}
/*---Create 3-D lattice--------------------------------------*/
int nmesh_x=0,nmesh_y=0,nmesh_z=0;
cellarray_mocks_index_particles_double *lattice1 = gridlink_mocks_index_particles_double(ND1, X1, Y1, Z1, D1, &(extra->weights0),
xmin, xmax, ymin, ymax, zmin, zmax,
smax, smax, smax,
options->bin_refine_factors[0],
options->bin_refine_factors[1],
options->bin_refine_factors[2],
&nmesh_x, &nmesh_y, &nmesh_z,
options);
if(lattice1 == NULL) {
return EXIT_FAILURE;
}
/* If there too few cells (BOOST_CELL_THRESH is ~10), and the number of cells can be increased, then boost bin refine factor by ~1*/
const double avg_np = ((double)ND1)/(nmesh_x*nmesh_y*nmesh_z);
const int8_t max_nmesh = fmax(nmesh_x, fmax(nmesh_y, nmesh_z));
if((max_nmesh <= BOOST_CELL_THRESH || avg_np >= BOOST_NUMPART_THRESH)
&& max_nmesh < options->max_cells_per_dim) {
fprintf(stderr,"%s> gridlink seems inefficient. nmesh = (%d, %d, %d); avg_np = %.3g. ", __FUNCTION__, nmesh_x, nmesh_y, nmesh_z, avg_np);
if(get_bin_refine_scheme(options) == BINNING_DFL) {
fprintf(stderr,"Boosting bin refine factor - should lead to better performance\n");
// Only boost the first two dimensions. Prevents excessive refinement.
for(int i=0;i<2;i++) {
options->bin_refine_factors[i] += BOOST_BIN_REF;
}
free_cellarray_mocks_index_particles_double(lattice1, nmesh_x * (int64_t) nmesh_y * nmesh_z);
lattice1 = gridlink_mocks_index_particles_double(ND1, X1, Y1, Z1, D1, &(extra->weights0),
xmin, xmax, ymin, ymax, zmin, zmax,
smax, smax, smax,
options->bin_refine_factors[0],
options->bin_refine_factors[1],
options->bin_refine_factors[2],
&nmesh_x, &nmesh_y, &nmesh_z,
options);
if(lattice1 == NULL) {
return EXIT_FAILURE;
}
} else {
fprintf(stderr,"Boosting bin refine factor could have helped. However, since custom bin refine factors "
"= (%d, %d, %d) are being used - continuing with inefficient mesh\n", options->bin_refine_factors[0],
options->bin_refine_factors[1], options->bin_refine_factors[2]);
}
}
cellarray_mocks_index_particles_double *lattice2 = NULL;
if(autocorr==0) {
int ngrid2_x=0,ngrid2_y=0,ngrid2_z=0;
lattice2 = gridlink_mocks_index_particles_double(ND2, X2, Y2, Z2, D2, &(extra->weights1),
xmin, xmax,
ymin, ymax,
zmin, zmax,
smax, smax, smax,
options->bin_refine_factors[0],
options->bin_refine_factors[1],
options->bin_refine_factors[2],
&ngrid2_x, &ngrid2_y, &ngrid2_z, options);
if(lattice2 == NULL) {
return EXIT_FAILURE;
}
if( ! (nmesh_x == ngrid2_x && nmesh_y == ngrid2_y && nmesh_z == ngrid2_z) ) {
fprintf(stderr,"Error: The two sets of 3-D lattices do not have identical bins. First has dims (%d, %d, %d) while second has (%d, %d, %d)\n",
nmesh_x, nmesh_y, nmesh_z, ngrid2_x, ngrid2_y, ngrid2_z);
return EXIT_FAILURE;
}
} else {
lattice2 = lattice1;
}
free(X1);free(Y1);free(Z1);
if(autocorr == 0) {
free(X2);free(Y2);free(Z2);
}
if(options->is_comoving_dist == 0) {
free(D1);
if(autocorr == 0) {
free(D2);
}
}
const int64_t totncells = (int64_t) nmesh_x * (int64_t) nmesh_y * (int64_t) nmesh_z;
{
int status = assign_ngb_cells_mocks_index_particles_double(lattice1, lattice2, totncells,
options->bin_refine_factors[0], options->bin_refine_factors[1], options->bin_refine_factors[2],
nmesh_x, nmesh_y, nmesh_z,
autocorr);
if(status != EXIT_SUCCESS) {
free_cellarray_mocks_index_particles_double(lattice1, totncells);
if(autocorr == 0) {
free_cellarray_mocks_index_particles_double(lattice2, totncells);
}
free(supp);
return EXIT_FAILURE;
}
}
/*---Gridlink-variables----------------*/
const int totnbins = (nmu_bins+1)*(nsbin+1);
const int nprojbins = nsbin-1;
#if defined(_OPENMP)
uint64_t **all_npairs = (uint64_t **) matrix_calloc(sizeof(uint64_t), numthreads, totnbins);
double **all_savg = NULL;
if(options->need_avg_sep){
all_savg = (double **) matrix_calloc(sizeof(double),numthreads,totnbins);
}
double **all_weightavg = NULL;
if(need_weightavg) {
all_weightavg = (double **) matrix_calloc(sizeof(double),numthreads,totnbins);
}
double **all_projpairs = (double **) matrix_calloc(sizeof(double),numthreads,nprojbins);
double **all_projpairs_tensor = (double **) matrix_calloc(sizeof(double),numthreads,nprojbins*nprojbins);
#else //USE_OMP
uint64_t npairs[totnbins];
double savg[totnbins], weightavg[totnbins], projpairs[nprojbins];
double projpairs_tensor[nprojbins*nprojbins];
for(int i=0; i <totnbins;i++) {
npairs[i] = 0;
if(options->need_avg_sep) {
savg[i] = ZERO;
}
if(need_weightavg) {
weightavg[i] = ZERO;
}
}
for(int i=0;i<nprojbins;i++) {
projpairs[i] = ZERO;
for(int j=0;j<nprojbins;j++) {
projpairs_tensor[i*nprojbins+j] = ZERO;
}
}
#endif //USE_OMP
/* runtime dispatch - get the function pointer */
countpairs_mocks_func_ptr_double countpairs_s_mu_mocks_function_double = countpairs_s_mu_mocks_driver_double(options);
if(countpairs_s_mu_mocks_function_double == NULL) {
return EXIT_FAILURE;
}
int interrupted=0,numdone=0, abort_status=EXIT_SUCCESS;
if(options->verbose) {
init_my_progressbar(totncells,&interrupted);
}
#if defined(_OPENMP)
#pragma omp parallel shared(numdone, abort_status, interrupt_status_DDsmu_mocks_double)
{
const int tid = omp_get_thread_num();
uint64_t npairs[totnbins];
double savg[totnbins], weightavg[totnbins], projpairs[nprojbins];
double projpairs_tensor[nprojbins*nprojbins];
for(int i=0;i<totnbins;i++) {
npairs[i] = 0;
if(options->need_avg_sep) {
savg[i] = ZERO;
}
if(need_weightavg) {
weightavg[i] = ZERO;
}
}
for(int i=0;i<nprojbins;i++) {
projpairs[i] = ZERO;
for(int j=0;j<nprojbins;j++) {
projpairs_tensor[i*nprojbins+j] = ZERO;
}
}
#pragma omp for schedule(dynamic)
#endif//USE_OMP
/*---Loop-over-Data1-particles--------------------*/
for(int64_t index1=0;index1<totncells;index1++) {
#if defined(_OPENMP)
#pragma omp flush (abort_status, interrupt_status_DDsmu_mocks_double)
#endif
if(abort_status == EXIT_SUCCESS && interrupt_status_DDsmu_mocks_double == EXIT_SUCCESS) {
//omp cancel was introduced in omp 4.0 - so this is my way of checking if loop needs to be cancelled
/* If the verbose option is not enabled, avoid outputting anything unnecessary*/
if(options->verbose) {
#if defined(_OPENMP)
if (omp_get_thread_num() == 0)
#endif
my_progressbar(numdone,&interrupted);
#if defined(_OPENMP)
#pragma omp atomic
#endif
numdone++;
}
const cellarray_mocks_index_particles_double *first = &(lattice1[index1]);
if(first->nelements == 0) {
continue;
}
double *x1 = first->x;
double *y1 = first->y;
double *z1 = first->z;
double *d1 = first->cz;
const weight_struct_double *weights1 = &(first->weights);
const int64_t N1 = first->nelements;
if(autocorr == 1) {
int same_cell = 1;
double *this_savg = options->need_avg_sep ? &(savg[0]):NULL;
double *this_weightavg = need_weightavg ? weightavg:NULL;
const int status = countpairs_s_mu_mocks_function_double(N1, x1, y1, z1, d1, weights1,
N1, x1, y1, z1, d1, weights1,
same_cell,
options->fast_divide_and_NR_steps,
smax, smin, nsbin,
nmu_bins, supp_sqr, mu_max,
this_savg, npairs, projpairs,
projpairs_tensor,
this_weightavg, extra->weight_method);
/* This actually causes a race condition under OpenMP - but mostly
I care that an error occurred - rather than the exact value of
the error status */
abort_status |= status;
}
for(int64_t ngb=0;ngb<first->num_ngb;ngb++){
const cellarray_mocks_index_particles_double *second = first->ngb_cells[ngb];
if(second->nelements == 0) {
continue;
}
const int same_cell = 0;
double *x2 = second->x;
double *y2 = second->y;
double *z2 = second->z;
double *d2 = second->cz;
const weight_struct_double *weights2 = &(second->weights);
const int64_t N2 = second->nelements;
double *this_savg = options->need_avg_sep ? &(savg[0]):NULL;
double *this_weightavg = need_weightavg ? weightavg:NULL;
const int status = countpairs_s_mu_mocks_function_double(N1, x1, y1, z1, d1, weights1,
N2, x2, y2, z2, d2, weights2,
same_cell,
options->fast_divide_and_NR_steps,
smax, smin, nsbin,
nmu_bins, supp_sqr, mu_max,
this_savg, npairs, projpairs,
projpairs_tensor,
this_weightavg, extra->weight_method);
/* This actually causes a race condition under OpenMP - but mostly
I care that an error occurred - rather than the exact value of
the error status */
abort_status |= status;
}//loop over ngb cells
}//abort_status check
}//i loop over ND1 particles
#if defined(_OPENMP)
for(int i=0;i<totnbins;i++) {
all_npairs[tid][i] = npairs[i];
if(options->need_avg_sep) {
all_savg[tid][i] = savg[i];
}
if(need_weightavg) {
all_weightavg[tid][i] = weightavg[i];
}
}
for (int i=0;i<nprojbins;i++) {
all_projpairs[tid][i] = projpairs[i];
for(int j=0;j<nprojbins;j++) {
all_projpairs_tensor[tid][i*nprojbins+j] = projpairs_tensor[i*nprojbins+j];
}
}
}//close the omp parallel region
#endif//USE_OMP
free_cellarray_mocks_index_particles_double(lattice1,totncells);
if(autocorr == 0) {
free_cellarray_mocks_index_particles_double(lattice2,totncells);
}
if(abort_status != EXIT_SUCCESS || interrupt_status_DDsmu_mocks_double != EXIT_SUCCESS) {
/* Cleanup memory here if aborting */
free(supp);
#if defined(_OPENMP)
matrix_free((void **) all_npairs, numthreads);
if(options->need_avg_sep) {
matrix_free((void **) all_savg, numthreads);
}
if(need_weightavg) {
matrix_free((void **) all_weightavg, numthreads);
}
matrix_free((void **) all_projpairs, numthreads);
matrix_free((void **) all_projpairs_tensor, numthreads);
#endif
return EXIT_FAILURE;
}
if(options->verbose) {
finish_myprogressbar(&interrupted);
}
#if defined(_OPENMP)
uint64_t npairs[totnbins];
double savg[totnbins], weightavg[totnbins], projpairs[nprojbins];
double projpairs_tensor[nprojbins*nprojbins];
for(int i=0;i<totnbins;i++) {
npairs[i] = 0;
if(options->need_avg_sep) {
savg[i] = ZERO;
}
if(need_weightavg) {
weightavg[i] = ZERO;
}
}
for(int i=0;i<nprojbins;i++) {
projpairs[i] = ZERO;
for(int j=0;j<nprojbins;j++) {
projpairs_tensor[i*nprojbins+j] = ZERO;
}
}
for(int i=0;i<numthreads;i++) {
for(int j=0;j<totnbins;j++) {
npairs[j] += all_npairs[i][j];
if(options->need_avg_sep) {
savg[j] += all_savg[i][j];
}
if(need_weightavg) {
weightavg[j] += all_weightavg[i][j];
}
}
for(int j=0;j<nprojbins;j++) {
projpairs[j] += all_projpairs[i][j];
for(int k=0;k<nprojbins;k++) {
projpairs_tensor[j*nprojbins+k] += all_projpairs_tensor[i][j*nprojbins+k];
}
}
}
matrix_free((void **) all_npairs, numthreads);
if(options->need_avg_sep) {
matrix_free((void **) all_savg, numthreads);
}
if(need_weightavg) {
matrix_free((void **) all_weightavg, numthreads);
}
matrix_free((void **) all_projpairs, numthreads);
matrix_free((void **) all_projpairs_tensor, numthreads);
#endif //USE_OMP
//The code does not double count for autocorrelations
//which means the npairs and savg values need to be doubled;
if(autocorr == 1) {
const uint64_t int_fac = 2;
const double dbl_fac = (double) 2.0;
for(int i=0;i<totnbins;i++) {
npairs[i] *= int_fac;
if(options->need_avg_sep) {
savg[i] *= dbl_fac;
}
if(need_weightavg) {
weightavg[i] *= dbl_fac;
}
}
//TODO: do i also want to double this? think so
for(int i=0;i<nprojbins;i++) {
projpairs[i] *= dbl_fac;
for(int j=0;j<nprojbins;j++) {
projpairs_tensor[i*nprojbins+j] *= dbl_fac;
}
}
}
for(int i=0;i<totnbins;i++) {
if(npairs[i] > 0) {
if(options->need_avg_sep) {
savg[i] /= (double) npairs[i] ;
}
if(need_weightavg) {
weightavg[i] /= (double) npairs[i];
}
}
}
// don't need proj_pairs here, not averaging
results->nsbin = nsbin;
results->nmu_bins = nmu_bins;
results->mu_max = max_mu;//NOTE max_mu which is double and not mu_max (which might be float)
results->mu_min = ZERO;
results->npairs = my_malloc(sizeof(*(results->npairs)), totnbins);
results->projpairs = my_malloc(sizeof(*(results->npairs)), nprojbins);
results->projpairs_tensor = my_malloc(sizeof(*(results->npairs)), nprojbins*nprojbins);
results->supp = my_malloc(sizeof(*(results->supp)) , nsbin);
results->savg = my_malloc(sizeof(*(results->savg)) , totnbins);
results->weightavg = my_calloc(sizeof(double) , totnbins);
if(results->npairs == NULL || results->supp == NULL || results->savg == NULL || results->weightavg == NULL || results->projpairs == NULL) {
free_results_mocks_s_mu(results);
free(supp);
return EXIT_FAILURE;
}
for(int i=0;i<nsbin;i++) {
results->supp[i] = supp[i];
for(int j=0;j<nmu_bins;j++) {
const int index = i*(nmu_bins+1) + j;
if( index >= totnbins ) {
fprintf(stderr, "ERROR: In %s> index = %d must be in range [0, %d)\n", __FUNCTION__, index, totnbins);
free_results_mocks_s_mu(results);
free(supp);
return EXIT_FAILURE;
}
results->npairs[index] = npairs[index];
results->savg[index] = ZERO;
results->weightavg[index] = ZERO;
if(options->need_avg_sep) {
results->savg[index] = savg[index];
}
if(need_weightavg) {
results->weightavg[index] = weightavg[index];
}
}
}
for(int i=0;i<nprojbins;i++) {
results->projpairs[i] = projpairs[i];
for(int j=0;j<nprojbins;j++) {
results->projpairs_tensor[i*nprojbins+j] = projpairs_tensor[i*nprojbins+j];
}
}
free(supp);
/* reset interrupt handlers to default */
RESET_INTERRUPT_HANDLERS();
reset_bin_refine_factors(options);
if(options->c_api_timer) {
struct timeval t1;
gettimeofday(&t1, NULL);
options->c_api_time = ADD_DIFF_TIME(t0, t1);
}
return EXIT_SUCCESS;
}
|
main.c | #include <omp.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#define BigInt int64_t // or __int128_t
#define MAX_RESULTS 1000 // print results between 0...<n>
#define MAX_ROUNDS 1000000 // use values between 0...<n>
static uint32_t numSolutions[MAX_RESULTS];
static void printNoSolutions()
{
#pragma omp parallel for
for (BigInt n = 0; n < MAX_RESULTS; ++n)
switch (n % 9)
{
case 4:
case 5:
printf("%3ld = no solution\n", (int64_t)n);
break;
default:
break;
}
}
static BigInt cubeNumbers[MAX_ROUNDS];
static void preCalculateCubeNumbers() // for performance
{
#pragma omp parallel for
for (BigInt i = 0; i < MAX_ROUNDS; ++i)
cubeNumbers[i] = i * i * i;
}
static void printSolution(BigInt n, BigInt x, BigInt y, BigInt z)
{
if (n < 0)
{ n = -n;
x = -x;
y = -y;
z = -z;
}
if (numSolutions[n]++)
return; // a solution for <n> already exits
// print formatted to be: x <= y <= z
if (x <= y && y <= z)
printf("%3ld = %ld³ + %ld³ + %ld³\n", (int64_t)n, (int64_t)x, (int64_t)y, (int64_t)z);
else if (x <= z && z <= y)
printf("%3ld = %ld³ + %ld³ + %ld³\n", (int64_t)n, (int64_t)x, (int64_t)z, (int64_t)y);
else if (y <= x && x <= z)
printf("%3ld = %ld³ + %ld³ + %ld³\n", (int64_t)n, (int64_t)y, (int64_t)x, (int64_t)z);
else if (y <= z && z <= x)
printf("%3ld = %ld³ + %ld³ + %ld³\n", (int64_t)n, (int64_t)y, (int64_t)z, (int64_t)x);
else if (z <= x && x <= y)
printf("%3ld = %ld³ + %ld³ + %ld³\n", (int64_t)n, (int64_t)z, (int64_t)x, (int64_t)y);
else
printf("%3ld = %ld³ + %ld³ + %ld³\n", (int64_t)n, (int64_t)z, (int64_t)y, (int64_t)x);
fflush(stdout); // to disable buffering
}
static void printSolutionsByBruteForce(BigInt beginOfSearch, BigInt endOfSearch)
{
for (BigInt x = beginOfSearch; x < endOfSearch; ++x)
{
const BigInt x3 = cubeNumbers[x];
#pragma omp parallel for
for (BigInt y = 0; y <= x; ++y)
{
const BigInt y3 = cubeNumbers[y];
for (BigInt z = 0; z <= y; ++z)
{
const BigInt z3 = cubeNumbers[z];
BigInt n = x3 + y3 + z3;
if (n < MAX_RESULTS)
printSolution(n, x, y, z);
n = -x3 + y3 + z3;
if (-MAX_RESULTS < n && n < MAX_RESULTS)
printSolution(n, -x, y, z);
n = x3 - y3 + z3;
if (-MAX_RESULTS < n && n < MAX_RESULTS)
printSolution(n, x, -y, z);
n = x3 + y3 - z3;
if (-MAX_RESULTS < n && n < MAX_RESULTS)
printSolution(n, x, y, -z);
n = x3 - y3 - z3;
if (-MAX_RESULTS < n && n < MAX_RESULTS)
printSolution(n, x, -y, -z);
}
}
}
}
static void printSolutionsByBinarySearch(BigInt beginOfSearch, BigInt endOfSearch)
{
#pragma omp parallel for
for (BigInt x = beginOfSearch; x < endOfSearch; ++x)
{
BigInt x3 = cubeNumbers[x];
for (BigInt y = 0; y <= x; ++y)
{
BigInt y3 = cubeNumbers[y];
// Binary search for: x³ - y³ - z³
BigInt min = 0, max = x;
do
{
const BigInt z = (min + max) / (BigInt)2;
const BigInt n = x3 - y3 - cubeNumbers[z];
if (n >= MAX_RESULTS)
max = z - 1;
else if (n <= -MAX_RESULTS)
min = z + 1;
else
{
printSolution(n, x, -y, -z);
break;
}
} while (min <= max);
// Binary search for: x³ - y³ + z³
min = 0;
max = x;
do
{
const BigInt z = (min + max) / (BigInt)2;
const BigInt n = x3 - y3 + cubeNumbers[z];
if (n >= MAX_RESULTS)
max = z - 1;
else if (n <= -MAX_RESULTS)
min = z + 1;
else
{
printSolution(n, x, -y, z);
break;
}
} while (min <= max);
// Binary search for: -x³ + y³ + z³
min = 0;
max = x;
do
{
const BigInt z = (min + max) / (BigInt)2;
const BigInt n = -x3 + y3 + cubeNumbers[z];
if (n >= MAX_RESULTS)
max = z - 1;
else if (n <= -MAX_RESULTS)
min = z + 1;
else
{
printSolution(n, -x, y, z);
break;
}
} while (min <= max);
}
}
}
int main()
{
printf("# List of simple solutions of n = x³ + y³ + z³ (for n < %d and x,y,z < %d, solutions formatted to be: x <= y <= z)\n", MAX_RESULTS, MAX_ROUNDS);
printNoSolutions();
preCalculateCubeNumbers();
printSolutionsByBruteForce(0, MAX_ROUNDS);
//printSolutionsByBinarySearch(0/*5000*/, MAX_ROUNDS);
return 0;
}
|
omp_hello.c | #include <sys/sysinfo.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <vcore.h>
int main (int argc, char *argv[])
{
int nthreads, tid;
printf("get_nprocs: %d\n", get_nprocs());
printf("SC_NPROCESSORS_ONLN: %d\n", sysconf (_SC_NPROCESSORS_ONLN));
printf("max num vcores: %d\n", max_vcores());
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel private(nthreads, tid)
{
/* Obtain thread number */
tid = omp_get_thread_num();
printf("Hello World from thread = %d\n", tid);
/* Only master thread does this */
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
} /* All threads join master thread and disband */
}
|
uts_omp_task_shmem.c | /*
* ---- The Unbalanced Tree Search (UTS) Benchmark ----
*
* Copyright (c) 2010 See AUTHORS file for copyright holders
*
* This file is part of the unbalanced tree search benchmark. This
* project is licensed under the MIT Open Source license. See the LICENSE
* file for copyright and licensing information.
*
* UTS is a collaborative project between researchers at the University of
* Maryland, the University of North Carolina at Chapel Hill, and the Ohio
* State University. See AUTHORS file for more information.
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <shmem.h>
#include "uts.h"
/***********************************************************
* *
* Compiler Type (these flags are set by at compile time) *
* (default) ANSI C compiler - sequential execution *
* (_OPENMP) OpenMP enabled C compiler *
* (__UPC__) UPC compiler *
* (_SHMEM) Cray Shmem *
* (__PTHREADS__) Pthreads multithreaded execution *
* *
***********************************************************/
#if defined(_OPENMP)
/**** OpenMP Definitions ****/
#include <omp.h>
#define PARALLEL 1
#define COMPILER_TYPE 1
#define SHARED
#define SHARED_INDEF
#define VOLATILE volatile
#define MAX_OMP_THREADS 32
#define MAX_SHMEM_THREADS 1024
#define LOCK_T omp_lock_t
#define GET_NUM_THREADS omp_get_num_threads()
#define GET_THREAD_NUM omp_get_thread_num()
#define SET_LOCK(zlk) omp_set_lock(zlk)
#define UNSET_LOCK(zlk) omp_unset_lock(zlk)
#define INIT_LOCK(zlk) zlk=omp_global_lock_alloc()
#define INIT_SINGLE_LOCK(zlk) zlk=omp_global_lock_alloc()
#define SMEMCPY memcpy
#define ALLOC malloc
#define BARRIER
// OpenMP helper function to match UPC lock allocation semantics
omp_lock_t * omp_global_lock_alloc() {
omp_lock_t *lock = (omp_lock_t *) malloc(sizeof(omp_lock_t) + 128);
omp_init_lock(lock);
return lock;
}
#else
#error Only supports OMP
#endif /* END Par. Model Definitions */
/***********************************************************
* Parallel execution parameters *
***********************************************************/
int doSteal = PARALLEL; // 1 => use work stealing
int chunkSize = 20; // number of nodes to move to/from shared area
int cbint = 1; // Cancellable barrier polling interval
int pollint = 1; // BUPC Polling interval
int n_nodes = 0;
int n_leaves = 0;
#ifdef THREAD_METADATA
typedef struct _thread_metadata {
size_t ntasks;
} thread_metadata;
thread_metadata t_metadata[MAX_OMP_THREADS];
#endif
typedef struct _per_thread_info {
int n_nodes;
int n_leaves;
} per_thread_info;
per_thread_info thread_info[MAX_OMP_THREADS];
#define N_BUFFERED_STEALS 16
Node steal_buffer[N_BUFFERED_STEALS];
volatile int n_buffered_steals = 0;
long steal_buffer_locks[MAX_SHMEM_THREADS];
int complete_pes = 0;
static int pe, npes;
static int steal_from(int target_pe, Node *stolen_out) {
int remote_buffered_steals;
shmem_set_lock(&steal_buffer_locks[target_pe]);
shmem_int_get(&remote_buffered_steals, (int *)&n_buffered_steals, 1, target_pe);
int stole_something = 0;
if (remote_buffered_steals > 0) {
remote_buffered_steals--;
shmem_getmem(stolen_out, &steal_buffer[remote_buffered_steals],
sizeof(Node), target_pe);
shmem_int_put((int *)&n_buffered_steals, &remote_buffered_steals, 1, target_pe);
stole_something = 1;
}
shmem_clear_lock(&steal_buffer_locks[target_pe]);
return stole_something;
}
static int remote_steal(Node *stolen_out) {
int pe_above = (pe + 1) % npes;
int pe_below = pe - 1;
if (pe_below < 0) pe_below = npes - 1;
int target_pe;
// First scan through all PEs looking for work
for (target_pe = 0; target_pe < npes; target_pe++) {
if (target_pe == pe) continue;
if (steal_from(target_pe, stolen_out)) {
return 1;
}
}
const int finish_check_periodicity = 3;
int ndone = shmem_int_finc(&complete_pes, 0) + 1;
int attempts = 0;
while (ndone != npes) {
// Try to remote steal
if (steal_from(pe_above, stolen_out) || steal_from(pe_below, stolen_out)) {
shmem_int_add(&complete_pes, -1, 0);
return 1;
}
pe_above = (pe_above + 1) % npes;
pe_below = pe_below - 1;
if (pe_below < 0) pe_below = npes - 1;
attempts++;
if (attempts % finish_check_periodicity == 0) {
ndone = shmem_int_fadd(&complete_pes, 0, 0);
}
}
assert(ndone == npes);
return 0;
}
#ifdef __BERKELEY_UPC__
/* BUPC nonblocking I/O Handles */
bupc_handle_t cb_handle = BUPC_COMPLETE_HANDLE;
const int local_cb_cancel = 1;
#endif
/***********************************************************
* Tree statistics (if selected via UTS_STAT) *
* compute overall size and imbalance metrics *
* and histogram size and imbalance per level *
***********************************************************/
#ifdef UTS_STAT
/* Check that we are not being asked to compile parallel with stats.
* Parallel stats collection is presently not supported. */
#if PARALLEL
#error "ERROR: Parallel stats collection is not supported!"
#endif
#define MAXHISTSIZE 2000 // max tree depth in histogram
int stats = 1;
int unbType = 1;
int maxHeight = 0; // maximum depth of tree
double maxImb = 0; // maximum imbalance
double minImb = 1;
double treeImb =-1; // Overall imbalance, undefined
int hist[MAXHISTSIZE+1][2]; // average # nodes per level
double unbhist[MAXHISTSIZE+1][3]; // average imbalance per level
int *rootSize; // size of the root's children
double *rootUnb; // imbalance of root's children
/* Tseng statistics */
int totalNodes = 0;
double imb_max = 0; // % of work in largest child (ranges from 100/n to 100%)
double imb_avg = 0;
double imb_devmaxavg = 0; // ( % of work in largest child ) - ( avg work )
double imb_normdevmaxavg = 0; // ( % of work in largest child ) - ( avg work ) / ( 100% - avg work )
#else
int stats = 0;
int unbType = -1;
#endif
/***********************************************************
* Execution Tracing *
***********************************************************/
#define SS_WORK 0
#define SS_SEARCH 1
#define SS_IDLE 2
#define SS_OVH 3
#define SS_CBOVH 4
#define SS_NSTATES 5
/* session record for session visualization */
struct sessionRecord_t {
double startTime, endTime;
};
typedef struct sessionRecord_t SessionRecord;
/* steal record for steal visualization */
struct stealRecord_t {
long int nodeCount; /* count nodes generated during the session */
int victimThread; /* thread from which we stole the work */
};
typedef struct stealRecord_t StealRecord;
/* Store debugging and trace data */
struct metaData_t {
SessionRecord sessionRecords[SS_NSTATES][20000]; /* session time records */
StealRecord stealRecords[20000]; /* steal records */
};
typedef struct metaData_t MetaData;
/* holds text string for debugging info */
char debug_str[1000];
/***********************************************************
* StealStack types *
***********************************************************/
/***********************************************************
* Global shared state *
***********************************************************/
// termination detection
VOLATILE SHARED int cb_cancel;
VOLATILE SHARED int cb_count;
VOLATILE SHARED int cb_done;
LOCK_T * cb_lock;
/***********************************************************
* UTS Implementation Hooks *
***********************************************************/
// Return a string describing this implementation
char * impl_getName() {
char * name[] = {"Sequential C", "C/OpenMP", "UPC", "SHMEM", "PThreads"};
return name[COMPILER_TYPE];
}
// construct string with all parameter settings
int impl_paramsToStr(char *strBuf, int ind) {
int n_omp_threads;
#pragma omp parallel
#pragma omp single
n_omp_threads = omp_get_num_threads();
ind += sprintf(strBuf+ind, "Execution strategy: ");
if (PARALLEL) {
ind += sprintf(strBuf+ind, "Parallel search using %d threads total (%d "
"SHMEM PEs, %d OMP threads per PE)\n", npes * n_omp_threads,
npes, n_omp_threads);
if (doSteal) {
ind += sprintf(strBuf+ind, " Load balance by work stealing, chunk size = %d nodes\n",chunkSize);
ind += sprintf(strBuf+ind, " CBarrier Interval: %d\n", cbint);
ind += sprintf(strBuf+ind, " Polling Interval: %d\n", pollint);
}
else
ind += sprintf(strBuf+ind, " No load balancing.\n");
}
else
ind += sprintf(strBuf+ind, "Iterative sequential search\n");
return ind;
}
int impl_parseParam(char *param, char *value) {
int err = 0; // Return 0 on a match, nonzero on an error
switch (param[1]) {
#if (PARALLEL == 1)
case 'c':
chunkSize = atoi(value); break;
case 's':
doSteal = atoi(value);
if (doSteal != 1 && doSteal != 0)
err = 1;
break;
case 'i':
cbint = atoi(value); break;
#ifdef __BERKELEY_UPC__
case 'I':
pollint = atoi(value); break;
#endif
#else /* !PARALLEL */
#ifdef UTS_STAT
case 'u':
unbType = atoi(value);
if (unbType > 2) {
err = 1;
break;
}
if (unbType < 0)
stats = 0;
else
stats = 1;
break;
#endif
#endif /* PARALLEL */
default:
err = 1;
break;
}
return err;
}
void impl_helpMessage() {
if (PARALLEL) {
printf(" -s int zero/nonzero to disable/enable work stealing\n");
printf(" -c int chunksize for work stealing\n");
printf(" -i int set cancellable barrier polling interval\n");
#ifdef __BERKELEY_UPC__
printf(" -I int set working bupc_poll() interval\n");
#endif
#ifdef __PTHREADS__
printf(" -T int set number of threads\n");
#endif
} else {
#ifdef UTS_STAT
printf(" -u int unbalance measure (-1: none; 0: min/size; 1: min/n; 2: max/n)\n");
#else
printf(" none.\n");
#endif
}
}
void impl_abort(int err) {
#if defined(__UPC__)
upc_global_exit(err);
#elif defined(_OPENMP)
exit(err);
#elif defined(_SHMEM)
exit(err);
#else
exit(err);
#endif
}
/***********************************************************
* *
* FUNCTIONS *
* *
***********************************************************/
/*
* StealStack
* Stack of nodes with sharing at the bottom of the stack
* and exclusive access at the top for the "owning" thread
* which has affinity to the stack's address space.
*
* * All operations on the shared portion of the stack
* must be guarded using the stack-specific lock.
* * Elements move between the shared and exclusive
* portion of the stack solely under control of the
* owning thread. (ss_release and ss_acquire)
* * workAvail is the count of elements in the shared
* portion of the stack. It may be read without
* acquiring the stack lock, but of course its value
* may not be acurate. Idle threads read workAvail in
* this speculative fashion to minimize overhead to
* working threads.
* * Elements can be stolen from the bottom of the shared
* portion by non-owning threads. The values are
* reserved under lock by the stealing thread, and then
* copied without use of the lock (currently space for
* reserved values is never reclaimed).
*
*/
/* fatal error */
void ss_error(char *str) {
printf("*** [Thread %i] %s\n",GET_THREAD_NUM, str);
exit(4);
}
#ifdef UTS_STAT
/*
* Statistics,
* : number of nodes per level
* : imbalanceness of nodes per level
*
*/
void initHist()
{
int i;
for (i=0; i<MAXHISTSIZE; i++){
hist[i][0]=0;
hist[i][1]=0;
unbhist[i][1]=1;
unbhist[i][2]=0;
}
}
void updateHist(Node* c, double unb)
{
if (c->height<MAXHISTSIZE){
hist[c->height][1]++;
hist[c->height][0]+=c->numChildren;
unbhist[c->height][0]+=unb;
if (unbhist[c->height][1]>unb)
unbhist[c->height][1]=unb;
if (unbhist[c->height][2]<unb)
unbhist[c->height][2]=unb;
}
else {
hist[MAXHISTSIZE][1]++;
hist[MAXHISTSIZE][0]+=c->numChildren;
}
}
void showHist(FILE *fp)
{
int i;
fprintf(fp, "depth\tavgNumChildren\t\tnumChildren\t imb\t maxImb\t minImb\t\n");
for (i=0; i<MAXHISTSIZE; i++){
if ((hist[i][0]!=0)&&(hist[i][1]!=0))
fprintf(fp, "%d\t%f\t%d\t %lf\t%lf\t%lf\n", i, (double)hist[i][0]/hist[i][1],
hist[i][0], unbhist[i][0]/hist[i][1], unbhist[i][1], unbhist[i][2]);
}
}
double getImb(Node *c)
{
int i=0;
double avg=.0, tmp=.0;
double unb=0.0;
avg=(double)c->sizeChildren/c->numChildren;
for (i=0; i<c->numChildren; i++){
if ((type==BIN)&&(c->pp==NULL))
{
if (unbType<2)
tmp=min((double)rootSize[i]/avg, avg/(double)rootSize[i]);
else
tmp=max((double)rootSize[i]/avg, avg/(double)rootSize[i]);
if (unbType>0)
unb+=tmp*rootUnb[i];
else
unb+=tmp*rootUnb[i]*rootSize[i];
}
else{
if (unbType<2)
tmp=min((double)c->size[i]/avg, avg/(double)c->size[i]);
else
tmp=max((double)c->size[i]/avg, avg/(double)c->size[i]);
if (unbType>0)
unb+=tmp*c->unb[i];
else
unb+=tmp*c->unb[i]*c->size[i];
}
}
if (unbType>0){
if (c->numChildren>0)
unb=unb/c->numChildren;
else unb=1.0;
}
else {
if (c->sizeChildren>1)
unb=unb/c->sizeChildren;
else unb=1.0;
}
if ((debug & 1) && unb>1) printf("unb>1%lf\t%d\n", unb, c->numChildren);
return unb;
}
void getImb_Tseng(Node *c)
{
double t_max, t_avg, t_devmaxavg, t_normdevmaxavg;
if (c->numChildren==0)
{
t_avg =0;
t_max =0;
}
else
{
t_max = (double)c->maxSizeChildren/(c->sizeChildren-1);
t_avg = (double)1/c->numChildren;
}
t_devmaxavg = t_max-t_avg;
if (debug & 1)
printf("max\t%lf, %lf, %d, %d, %d\n", t_max, t_avg,
c->maxSizeChildren, c->sizeChildren, c->numChildren);
if (1-t_avg==0)
t_normdevmaxavg = 1;
else
t_normdevmaxavg = (t_max-t_avg)/(1-t_avg);
imb_max += t_max;
imb_avg += t_avg;
imb_devmaxavg += t_devmaxavg;
imb_normdevmaxavg +=t_normdevmaxavg;
}
void updateParStat(Node *c)
{
double unb;
totalNodes++;
if (maxHeight<c->height)
maxHeight=c->height;
unb=getImb(c);
maxImb=max(unb, maxImb);
minImb=min(unb, minImb);
updateHist(c, unb);
getImb_Tseng(c);
if (c->pp!=NULL){
if ((c->type==BIN)&&(c->pp->pp==NULL)){
rootSize[c->pp->ind]=c->sizeChildren;
rootUnb[c->pp->ind]=unb;
}
else{
c->pp->size[c->pp->ind]=c->sizeChildren;
c->pp->unb[c->pp->ind]=unb;
}
/* update statistics per node*/
c->pp->ind++;
c->pp->sizeChildren+=c->sizeChildren;
if (c->pp->maxSizeChildren<c->sizeChildren)
c->pp->maxSizeChildren=c->sizeChildren;
}
else
treeImb = unb;
}
#endif
/*
* Tree Implementation
*
*/
void initNode(Node * child)
{
child->type = -1;
child->height = -1;
child->numChildren = -1; // not yet determined
#ifdef UTS_STAT
if (stats){
int i;
child->ind = 0;
child->sizeChildren = 1;
child->maxSizeChildren = 0;
child->pp = NULL;
for (i = 0; i < MAXNUMCHILDREN; i++){
child->size[i] = 0;
child->unb[i] = 0.0;
}
}
#endif
}
void initRootNode(Node * root, int type)
{
uts_initRoot(root, type);
#ifdef TRACE
stealStack[0]->md->stealRecords[0].victimThread = 0; // first session is own "parent session"
#endif
#ifdef UTS_STAT
if (stats){
int i;
root->ind = 0;
root->sizeChildren = 1;
root->maxSizeChildren = 1;
root->pp = NULL;
if (type != BIN){
for (i=0; i<MAXNUMCHILDREN; i++){
root->size[i] = 0;
root->unb[i] =.0;
}
}
else {
int rbf = (int) ceil(b_0);
rootSize = malloc(rbf*sizeof(int));
rootUnb = malloc(rbf*sizeof(double));
for (i = 0; i < rbf; i++) {
rootSize[i] = 0;
rootUnb[i] = 0.0;
}
}
}
#endif
}
/*
* Generate all children of the parent
*
* details depend on tree type, node type and shape function
*
*/
void genChildren(Node * parent, Node * child) {
int parentHeight = parent->height;
int numChildren, childType;
#ifdef THREAD_METADATA
t_metadata[omp_get_thread_num()].ntasks += 1;
#endif
thread_info[omp_get_thread_num()].n_nodes++;
numChildren = uts_numChildren(parent);
childType = uts_childType(parent);
// record number of children in parent
parent->numChildren = numChildren;
// construct children and push onto stack
if (numChildren > 0) {
int i, j;
child->type = childType;
child->height = parentHeight + 1;
#ifdef UTS_STAT
if (stats) {
child->pp = parent; // pointer to parent
}
#endif
const unsigned char * parent_state = parent->state.state;
unsigned char * child_state = child->state.state;
for (i = 0; i < numChildren; i++) {
for (j = 0; j < computeGranularity; j++) {
// TBD: add parent height to spawn
// computeGranularity controls number of rng_spawn calls per node
rng_spawn(parent_state, child_state, i);
}
Node parent = *child;
int made_available_for_stealing = 0;
if (omp_get_thread_num() == 0 && n_buffered_steals < N_BUFFERED_STEALS) {
shmem_set_lock(&steal_buffer_locks[pe]);
if (n_buffered_steals < N_BUFFERED_STEALS) {
steal_buffer[n_buffered_steals++] = parent;
made_available_for_stealing = 1;
}
shmem_clear_lock(&steal_buffer_locks[pe]);
}
if (!made_available_for_stealing) {
#pragma omp task untied firstprivate(parent) if(parent.height < 9)
{
Node child;
initNode(&child);
genChildren(&parent, &child);
}
}
}
} else {
thread_info[omp_get_thread_num()].n_leaves++;
}
}
// causes one or more threads waiting at barrier, if any,
// to be released
#ifdef TRACE
// print session records for each thread (used when trace is enabled)
void printSessionRecords()
{
int i, j, k;
double offset;
for (i = 0; i < GET_NUM_THREADS; i++) {
offset = startTime[i] - startTime[0];
for (j = 0; j < SS_NSTATES; j++)
for (k = 0; k < stealStack[i]->entries[j]; k++) {
printf ("%d %d %f %f", i, j,
stealStack[i]->md->sessionRecords[j][k].startTime - offset,
stealStack[i]->md->sessionRecords[j][k].endTime - offset);
if (j == SS_WORK)
printf (" %d %ld",
stealStack[i]->md->stealRecords[k].victimThread,
stealStack[i]->md->stealRecords[k].nodeCount);
printf ("\n");
}
}
}
#endif
// display search statistics
void showStats(double elapsedSecs) {
int i;
int tnodes = 0, tleaves = 0, trel = 0, tacq = 0, tsteal = 0, tfail= 0;
int mdepth = 0, mheight = 0;
double twork = 0.0, tsearch = 0.0, tidle = 0.0, tovh = 0.0, tcbovh = 0.0;
// // combine measurements from all threads
// for (i = 0; i < GET_NUM_THREADS; i++) {
// tnodes += stealStack[i]->nNodes;
// tleaves += stealStack[i]->nLeaves;
// trel += stealStack[i]->nRelease;
// tacq += stealStack[i]->nAcquire;
// tsteal += stealStack[i]->nSteal;
// tfail += stealStack[i]->nFail;
// twork += stealStack[i]->time[SS_WORK];
// tsearch += stealStack[i]->time[SS_SEARCH];
// tidle += stealStack[i]->time[SS_IDLE];
// tovh += stealStack[i]->time[SS_OVH];
// tcbovh += stealStack[i]->time[SS_CBOVH];
// mdepth = max(mdepth, stealStack[i]->maxStackDepth);
// mheight = max(mheight, stealStack[i]->maxTreeDepth);
// }
// if (trel != tacq + tsteal) {
// printf("*** error! total released != total acquired + total stolen\n");
// }
//
uts_showStats(GET_NUM_THREADS, chunkSize, elapsedSecs, n_nodes, n_leaves, mheight);
//
// if (verbose > 1) {
// if (doSteal) {
// printf("Total chunks released = %d, of which %d reacquired and %d stolen\n",
// trel, tacq, tsteal);
// printf("Failed steal operations = %d, ", tfail);
// }
//
// printf("Max stealStack size = %d\n", mdepth);
// printf("Avg time per thread: Work = %.6f, Search = %.6f, Idle = %.6f\n", (twork / GET_NUM_THREADS),
// (tsearch / GET_NUM_THREADS), (tidle / GET_NUM_THREADS));
// printf(" Overhead = %6f, CB_Overhead = %6f\n\n", (tovh / GET_NUM_THREADS),
// (tcbovh/GET_NUM_THREADS));
// }
//
// // per thread execution info
// if (verbose > 2) {
// for (i = 0; i < GET_NUM_THREADS; i++) {
// printf("** Thread %d\n", i);
// printf(" # nodes explored = %d\n", stealStack[i]->nNodes);
// printf(" # chunks released = %d\n", stealStack[i]->nRelease);
// printf(" # chunks reacquired = %d\n", stealStack[i]->nAcquire);
// printf(" # chunks stolen = %d\n", stealStack[i]->nSteal);
// printf(" # failed steals = %d\n", stealStack[i]->nFail);
// printf(" maximum stack depth = %d\n", stealStack[i]->maxStackDepth);
// printf(" work time = %.6f secs (%d sessions)\n",
// stealStack[i]->time[SS_WORK], stealStack[i]->entries[SS_WORK]);
// printf(" overhead time = %.6f secs (%d sessions)\n",
// stealStack[i]->time[SS_OVH], stealStack[i]->entries[SS_OVH]);
// printf(" search time = %.6f secs (%d sessions)\n",
// stealStack[i]->time[SS_SEARCH], stealStack[i]->entries[SS_SEARCH]);
// printf(" idle time = %.6f secs (%d sessions)\n",
// stealStack[i]->time[SS_IDLE], stealStack[i]->entries[SS_IDLE]);
// printf(" wakeups = %d, false wakeups = %d (%.2f%%)",
// stealStack[i]->wakeups, stealStack[i]->falseWakeups,
// (stealStack[i]->wakeups == 0) ? 0.00 : ((((double)stealStack[i]->falseWakeups)/stealStack[i]->wakeups)*100.0));
// printf("\n");
// }
// }
//
// #ifdef TRACE
// printSessionRecords();
// #endif
//
// // tree statistics output to stat.txt, if requested
// #ifdef UTS_STAT
// if (stats) {
// FILE *fp;
// char * tmpstr;
// char strBuf[5000];
// int ind = 0;
//
// fp = fopen("stat.txt", "a+w");
// fprintf(fp, "\n------------------------------------------------------------------------------------------------------\n");
// ind = uts_paramsToStr(strBuf, ind);
// ind = impl_paramsToStr(strBuf, ind);
// //showParametersStr(strBuf);
// fprintf(fp, "%s\n", strBuf);
//
// fprintf(fp, "\nTotal nodes = %d\n", totalNodes);
// fprintf(fp, "Max depth = %d\n\n", maxHeight);
// fprintf(fp, "Tseng ImbMeasure(overall)\n max:\t\t%lf \n avg:\t\t%lf \n devMaxAvg:\t %lf\n normDevMaxAvg: %lf\t\t\n\n",
// imb_max/totalNodes, imb_avg/totalNodes, imb_devmaxavg/totalNodes,
// imb_normdevmaxavg/totalNodes);
//
// switch (unbType){
// case 0: tmpstr = "(min imb weighted by size)"; break;
// case 1: tmpstr = "(min imb not weighted by size)"; break;
// case 2: tmpstr = "(max imb not weighted by size)"; break;
// default: tmpstr = "(?unknown measure)"; break;
// }
// fprintf(fp, "ImbMeasure:\t%s\n Overall:\t %lf\n Max:\t\t%lf\n Min:\t\t%lf\n\n",
// tmpstr, treeImb, minImb, maxImb);
// showHist(fp);
// fprintf(fp, "\n------------------------------------------------------------------------------------------------------\n\n\n");
// fclose(fp);
// }
// #endif
}
/* Main() function for: Sequential, OpenMP, UPC, and Shmem
*
* Notes on execution model:
* - under openMP, global vars are all shared
* - under UPC, global vars are private unless explicitly shared
* - UPC is SPMD starting with main, OpenMP goes SPMD after
* parsing parameters
*/
int main(int argc, char *argv[]) {
#ifdef THREAD_METADATA
memset(t_metadata, 0x00, MAX_OMP_THREADS * sizeof(thread_metadata));
#endif
memset(thread_info, 0x00, MAX_OMP_THREADS * sizeof(per_thread_info));
memset(steal_buffer_locks, 0x00, MAX_SHMEM_THREADS * sizeof(long));
shmem_init();
pe = shmem_my_pe();
npes = shmem_n_pes();
/* determine benchmark parameters (all PEs) */
uts_parseParams(argc, argv);
#ifdef UTS_STAT
if (stats) {
initHist();
}
#endif
double t1, t2, et;
/* show parameter settings */
if (pe == 0) {
uts_printParams();
}
Node root;
initRootNode(&root, type);
shmem_barrier_all();
/* time parallel search */
t1 = uts_wctime();
int n_omp_threads;
/********** SPMD Parallel Region **********/
#pragma omp parallel
{
#pragma omp master
{
int first = 1;
n_omp_threads = omp_get_num_threads();
assert(n_omp_threads <= MAX_OMP_THREADS);
Node child;
retry:
initNode(&child);
if (first) {
if (pe == 0) {
genChildren(&root, &child);
}
} else {
genChildren(&root, &child);
}
first = 0;
#pragma omp taskwait
if (n_buffered_steals > 0) {
shmem_set_lock(&steal_buffer_locks[pe]);
if (n_buffered_steals > 0) {
n_buffered_steals--;
memcpy(&root, &steal_buffer[n_buffered_steals], sizeof(root));
shmem_clear_lock(&steal_buffer_locks[pe]);
goto retry;
} else {
shmem_clear_lock(&steal_buffer_locks[pe]);
}
}
const int got_more_work = remote_steal(&root);
if (got_more_work == 1) {
goto retry;
}
}
}
shmem_barrier_all();
t2 = uts_wctime();
et = t2 - t1;
int i;
for (i = 0; i < MAX_OMP_THREADS; i++) {
n_nodes += thread_info[i].n_nodes;
n_leaves += thread_info[i].n_leaves;
}
shmem_barrier_all();
if (pe != 0) {
shmem_int_add(&n_nodes, n_nodes, 0);
shmem_int_add(&n_leaves, n_leaves, 0);
}
shmem_barrier_all();
if (pe == 0) {
showStats(et);
}
/********** End Parallel Region **********/
#ifdef THREAD_METADATA
int p;
for (p = 0; p < npes; p++) {
if (p == pe) {
printf("\n");
int i;
for (i = 0; i < n_omp_threads; i++) {
printf("PE %d, thread %d: %lu tasks\n", p, i, t_metadata[i].ntasks);
}
}
shmem_barrier_all();
}
#endif
shmem_finalize();
return 0;
}
|
boolarray.h | /**
* This code is released under the
* Apache License Version 2.0 http://www.apache.org/licenses/.
*
* (c) Daniel Lemire, http://lemire.me/en/
*/
#ifndef BOOLARRAY_H
#define BOOLARRAY_H
#include <iso646.h> // mostly for Microsoft compilers
#include <stdarg.h>
#include <cassert>
#include <cstring>
#include <iostream>
#include <vector>
#include <stdexcept>
#include <sstream>
// uncomment this for debugging
//#define EWAHASSERT
/**
* A dynamic bitset implementation. (without compression).
*/
template<class uword = uint32_t>
class BoolArray {
public:
BoolArray(const size_t n, const uword initval = 0)
: buffer(n / wordinbits + (n % wordinbits == 0 ? 0 : 1), initval),
sizeinbits(n) {}
BoolArray() : buffer(), sizeinbits(0) {}
BoolArray(const BoolArray &ba)
: buffer(ba.buffer), sizeinbits(ba.sizeinbits) {}
BoolArray(BoolArray &&bool_arr) noexcept {
this->buffer = std::move(bool_arr.buffer);
this->sizeinbits = bool_arr.sizeinbits;
}
BoolArray &operator=(BoolArray &&bool_arr) noexcept {
this->buffer = std::move(bool_arr.buffer);
this->sizeinbits = bool_arr.sizeinbits;
return *this;
}
static BoolArray bitmapOf(size_t n, ...) {
BoolArray ans;
va_list vl;
va_start(vl, n);
for (size_t i = 0; i < n; i++) {
ans.set(static_cast<size_t>(va_arg(vl, int)));
}
va_end(vl);
return ans;
}
// Yche Updated:
void clearWordsInParallelOMP(size_t range) {
#pragma omp for
for (auto word_idx = 0; word_idx < (range + wordinbits - 1) / wordinbits; word_idx++) {
setWord(word_idx, 0);
}
}
size_t sizeOfWords() const { return buffer.size(); }
size_t sizeOfWordsRange(size_t range) const { return (range + wordinbits - 1) / wordinbits; }
size_t sizeInBytes() const { return buffer.size() * sizeof(uword); }
void read(std::istream &in) {
sizeinbits = 0;
in.read(reinterpret_cast<char *>(&sizeinbits), sizeof(sizeinbits));
buffer.resize(sizeinbits / wordinbits +
(sizeinbits % wordinbits == 0 ? 0 : 1));
if (buffer.size() == 0)
return;
in.read(reinterpret_cast<char *>(&buffer[0]),
static_cast<std::streamsize>(buffer.size() * sizeof(uword)));
}
void readBuffer(std::istream &in, const size_t size) {
buffer.resize(size);
sizeinbits = size * sizeof(uword) * 8;
if (buffer.empty())
return;
in.read(reinterpret_cast<char *>(&buffer[0]),
buffer.size() * sizeof(uword));
}
void setSizeInBits(const size_t sizeib) { sizeinbits = sizeib; }
void write(std::ostream &out) { write(out, sizeinbits); }
void write(std::ostream &out, const size_t numberofbits) const {
const size_t size =
numberofbits / wordinbits + (numberofbits % wordinbits == 0 ? 0 : 1);
out.write(reinterpret_cast<const char *>(&numberofbits),
sizeof(numberofbits));
if (numberofbits == 0)
return;
out.write(reinterpret_cast<const char *>(&buffer[0]),
static_cast<std::streamsize>(size * sizeof(uword)));
}
void writeBuffer(std::ostream &out, const size_t numberofbits) const {
const size_t size =
numberofbits / wordinbits + (numberofbits % wordinbits == 0 ? 0 : 1);
if (size == 0)
return;
#ifdef EWAHASSERT
assert(buffer.size() >= size);
#endif
out.write(reinterpret_cast<const char *>(&buffer[0]), size * sizeof(uword));
}
size_t sizeOnDisk() const {
size_t size =
sizeinbits / wordinbits + (sizeinbits % wordinbits == 0 ? 0 : 1);
return sizeof(sizeinbits) + size * sizeof(uword);
}
BoolArray &operator=(const BoolArray &x) {
this->buffer = x.buffer;
this->sizeinbits = x.sizeinbits;
return *this;
}
bool operator==(const BoolArray &x) const {
if (sizeinbits != x.sizeinbits)
return false;
for (size_t k = 0; k < buffer.size(); ++k)
if (buffer[k] != x.buffer[k])
return false;
return true;
}
bool operator!=(const BoolArray &x) const { return !operator==(x); }
void setWord(const size_t pos, const uword val) {
#ifdef EWAHASSERT
assert(pos < buffer.size());
#endif
buffer[pos] = val;
}
void addWord(const uword val) {
if (sizeinbits % wordinbits != 0)
throw std::invalid_argument("you probably didn't want to do this");
sizeinbits += wordinbits;
buffer.push_back(val);
}
uword getWord(const size_t pos) const {
#ifdef EWAHASSERT
assert(pos < buffer.size());
#endif
return buffer[pos];
}
/**
* set to true (whether it was already set to true or not)
*/
void set(const size_t pos) {
if (pos >= sizeinbits)
padWithZeroes(pos + 1);
buffer[pos / wordinbits] |= (static_cast<uword>(1) << (pos % wordinbits));
}
void set_atomic(const size_t pos) {
auto *addr = &buffer[(pos / wordinbits)];
uword old_val;
uword new_val;
uword or_val = (static_cast<uword>(1) << (pos % wordinbits));
do {
old_val = *addr;
new_val = (old_val | or_val);
} while (!__sync_bool_compare_and_swap(addr, old_val, new_val));
}
void unset_atomic(const size_t pos) {
auto *addr = &buffer[(pos / wordinbits)];
uword old_val;
uword new_val;
uword and_val = ~(static_cast<uword>(1) << (pos % wordinbits));
do {
old_val = *addr;
new_val = (old_val & and_val);
} while (!__sync_bool_compare_and_swap(addr, old_val, new_val));
}
/**
* set to false (whether it was already set to false or not)
*
*/
void unset(const size_t pos) {
if (pos < sizeinbits)
buffer[pos / wordinbits] &=
~(static_cast<uword>(1) << (pos % wordinbits));
}
/**
* true of false? (set or unset)
*/
bool get(const size_t pos) const {
#ifdef EWAHASSERT
assert(pos / wordinbits < buffer.size());
#endif
return (buffer[pos / wordinbits] &
(static_cast<uword>(1) << (pos % wordinbits))) != 0;
}
bool operator[](const size_t pos) const {
#ifdef EWAHASSERT
assert(pos / wordinbits < buffer.size());
#endif
return (buffer[pos / wordinbits] &
(static_cast<uword>(1) << (pos % wordinbits))) != 0;
}
/**
* set all bits to 0
*/
void reset() {
if (buffer.size() > 0)
memset(&buffer[0], 0, sizeof(uword) * buffer.size());
sizeinbits = 0;
}
size_t sizeInBits() const { return sizeinbits; }
~BoolArray() {}
/**
* Computes the logical and and writes to the provided BoolArray (out).
* The current bitmaps is unchanged.
*/
void logicaland(const BoolArray &ba, BoolArray &out) const {
if (ba.buffer.size() < buffer.size())
out.setToSize(ba);
else
out.setToSize(*this);
for (size_t i = 0; i < out.buffer.size(); ++i)
out.buffer[i] = buffer[i] & ba.buffer[i];
}
/**
* Computes the logical and and return the result.
* The current bitmaps is unchanged.
*/
BoolArray logicaland(const BoolArray &a) const {
BoolArray answer;
logicaland(a, answer);
return answer;
}
void inplace_logicaland(const BoolArray &ba) {
if (ba.buffer.size() < buffer.size())
setToSize(ba);
for (size_t i = 0; i < buffer.size(); ++i)
buffer[i] = buffer[i] & ba.buffer[i];
}
/**
* Computes the logical andnot and writes to the provided BoolArray (out).
* The current bitmaps is unchanged.
*/
void logicalandnot(const BoolArray &ba, BoolArray &out) const {
out.setToSize(*this);
size_t upto = out.buffer.size() < ba.buffer.size() ? out.buffer.size()
: ba.buffer.size();
for (size_t i = 0; i < upto; ++i)
out.buffer[i] = buffer[i] & (~ba.buffer[i]);
for (size_t i = upto; i < out.buffer.size(); ++i)
out.buffer[i] = buffer[i];
out.clearBogusBits();
}
/**
* Computes the logical andnot and return the result.
* The current bitmaps is unchanged.
*/
BoolArray logicalandnot(const BoolArray &a) const {
BoolArray answer;
logicalandnot(a, answer);
return answer;
}
void inplace_logicalandnot(const BoolArray &ba) {
size_t upto =
buffer.size() < ba.buffer.size() ? buffer.size() : ba.buffer.size();
for (size_t i = 0; i < upto; ++i)
buffer[i] = buffer[i] & (~ba.buffer[i]);
clearBogusBits();
}
/**
* Computes the logical or and writes to the provided BoolArray (out).
* The current bitmaps is unchanged.
*/
void logicalor(const BoolArray &ba, BoolArray &out) const {
const BoolArray *smallest;
const BoolArray *largest;
if (ba.buffer.size() > buffer.size()) {
smallest = this;
largest = &ba;
out.setToSize(ba);
} else {
smallest = &ba;
largest = this;
out.setToSize(*this);
}
for (size_t i = 0; i < smallest->buffer.size(); ++i)
out.buffer[i] = buffer[i] | ba.buffer[i];
for (size_t i = smallest->buffer.size(); i < largest->buffer.size(); ++i)
out.buffer[i] = largest->buffer[i];
}
/**
* Computes the logical or and return the result.
* The current bitmaps is unchanged.
*/
BoolArray logicalor(const BoolArray &a) const {
BoolArray answer;
logicalor(a, answer);
return answer;
}
void inplace_logicalor(const BoolArray &ba) { logicalor(ba, *this); }
/**
* Computes the logical xor and writes to the provided BoolArray (out).
* The current bitmaps is unchanged.
*/
void logicalxor(const BoolArray &ba, BoolArray &out) const {
const BoolArray *smallest;
const BoolArray *largest;
if (ba.buffer.size() > buffer.size()) {
smallest = this;
largest = &ba;
out.setToSize(ba);
} else {
smallest = &ba;
largest = this;
out.setToSize(*this);
}
for (size_t i = 0; i < smallest->buffer.size(); ++i)
out.buffer[i] = buffer[i] ^ ba.buffer[i];
for (size_t i = smallest->buffer.size(); i < largest->buffer.size(); ++i)
out.buffer[i] = largest->buffer[i];
}
/**
* Computes the logical xor and return the result.
* The current bitmaps is unchanged.
*/
BoolArray logicalxor(const BoolArray &a) const {
BoolArray answer;
logicalxor(a, answer);
return answer;
}
void inplace_logicalxor(const BoolArray &ba) { logicalxor(ba, *this); }
/**
* Computes the logical not and writes to the provided BoolArray (out).
* The current bitmaps is unchanged.
*/
void logicalnot(BoolArray &out) const {
out.setToSize(*this);
for (size_t i = 0; i < buffer.size(); ++i)
out.buffer[i] = ~buffer[i];
out.clearBogusBits();
}
/**
* Computes the logical not and return the result.
* The current bitmaps is unchanged.
*/
BoolArray logicalandnot() const {
BoolArray answer;
logicalnot(answer);
return answer;
}
void inplace_logicalnot() {
for (size_t i = 0; i < buffer.size(); ++i)
buffer[i] = ~buffer[i];
clearBogusBits();
}
/**
* Returns the number of bits set to the value 1.
* The running time complexity is proportional to the
* size of the bitmap.
*
* This is sometimes called the cardinality.
*/
size_t numberOfOnes() const {
size_t count = 0;
for (size_t i = 0; i < buffer.size(); ++i) {
count += countOnes(buffer[i]);
}
return count;
}
inline void printout(std::ostream &o = std::cout) {
for (size_t k = 0; k < sizeinbits; ++k)
o << get(k) << " ";
o << std::endl;
}
/**
* Make sure the two bitmaps have the same size (padding with zeroes
* if necessary). It has constant running time complexity.
*/
void makeSameSize(BoolArray &a) {
if (a.sizeinbits < sizeinbits)
a.padWithZeroes(sizeinbits);
else if (sizeinbits < a.sizeinbits)
padWithZeroes(a.sizeinbits);
}
/**
* Make sure the current bitmap has the size of the provided bitmap.
*/
void setToSize(const BoolArray &a) {
sizeinbits = a.sizeinbits;
buffer.resize(a.buffer.size());
}
/**
* make sure the size of the array is totalbits bits by padding with zeroes.
* returns the number of words added (storage cost increase)
*/
size_t padWithZeroes(const size_t totalbits) {
size_t currentwordsize = (sizeinbits + wordinbits - 1) / wordinbits;
size_t neededwordsize = (totalbits + wordinbits - 1) / wordinbits;
#ifdef EWAHASSERT
assert(neededwordsize >= currentwordsize);
#endif
buffer.resize(neededwordsize);
sizeinbits = totalbits;
return static_cast<size_t>(neededwordsize - currentwordsize);
}
void append(const BoolArray &a);
enum {
wordinbits = sizeof(uword) * 8
};
std::vector<size_t> toArray() const {
std::vector<size_t> ans;
for (size_t k = 0; k < buffer.size(); ++k) {
uword myword = buffer[k];
while (myword != 0) {
uint32_t ntz = numberOfTrailingZeros(myword);
ans.push_back(sizeof(uword) * 8 * k + ntz);
myword ^= (static_cast<uword>(1) << ntz);
}
}
return ans;
}
/**
* Transform into a string that presents a list of set bits.
* The running time is linear in the size of the bitmap.
*/
operator std::string() const {
std::stringstream ss;
ss << *this;
return ss.str();
}
friend std::ostream &operator<<(std::ostream &out, const BoolArray &a) {
std::vector<size_t> v = a.toArray();
out << "{";
for (std::vector<size_t>::const_iterator i = v.begin(); i != v.end();) {
out << *i;
++i;
if (i != v.end())
out << ",";
}
out << "}";
return out;
// return (out << static_cast<std::string>(a));
}
private:
void clearBogusBits() {
if ((sizeinbits % wordinbits) != 0) {
const uword maskbogus =
(static_cast<uword>(1) << (sizeinbits % wordinbits)) - 1;
buffer[buffer.size() - 1] &= maskbogus;
}
}
std::vector<uword> buffer;
size_t sizeinbits{};
};
/**
* computes the logical or (union) between "n" bitmaps (referenced by a
* pointer).
* The answer gets written out in container. This might be faster than calling
* logicalor n-1 times.
*/
template<class uword>
void fast_logicalor_tocontainer(size_t n, const BoolArray<uword> **inputs,
BoolArray<uword> &container) {
if (n == 0) {
container.reset();
return;
}
container = *inputs[0];
for (size_t i = 0; i < n; i++) {
container.inplace_logicalor(*inputs[i]);
}
}
/**
* computes the logical or (union) between "n" bitmaps (referenced by a
* pointer).
* Returns the answer. This might be faster than calling
* logicalor n-1 times.
*/
template<class uword>
BoolArray<uword> fast_logicalor(size_t n, const BoolArray<uword> **inputs) {
BoolArray<uword> answer;
fast_logicalor_tocontainer(n, inputs, answer);
return answer;
}
template<class uword>
void BoolArray<uword>::append(const BoolArray &a) {
if (sizeinbits % wordinbits == 0) {
buffer.insert(buffer.end(), a.buffer.begin(), a.buffer.end());
} else {
throw std::invalid_argument(
"Cannot append if parent does not meet boundary");
}
sizeinbits += a.sizeinbits;
}
#endif
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *);
typedef MagickBooleanType
DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *, DDSVector4 *, unsigned char *, size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT3(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT5(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadUncompressedRGB(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
ReadUncompressedRGBA(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *),
WriteMipmaps(Image *,const ImageInfo*,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,const MagickBooleanType,
ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3* end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4* points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z)));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
Image
*image;
MagickBooleanType
status,
cubemap,
volume,
read_mipmaps;
PixelTrait
alpha_trait;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cubemap=MagickFalse,
volume=MagickFalse,
read_mipmaps=MagickFalse;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if ((num_images == 0) || (num_images > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse)
ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit");
option=GetImageOption(image_info,"dds:skip-mipmaps");
if (IsStringFalse(option) != MagickFalse)
read_mipmaps=MagickTrue;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
/* Start a new image */
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->alpha_trait=alpha_trait;
image->compression=compression;
image->columns=dds_info.width;
image->rows=dds_info.height;
image->storage_class=DirectClass;
image->endian=LSBEndian;
image->depth=8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
(void) SetImageBackgroundColor(image,exception);
status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y,
DDSColors colors,size_t bits,Quantum *q)
{
register ssize_t
i;
ssize_t
j;
unsigned char
code;
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q);
if ((colors.a[code] != 0) &&
(image->alpha_trait == UndefinedPixelTrait))
return(MagickFalse);
q+=GetPixelChannels(image);
}
}
}
return(MagickTrue);
}
static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
status=MagickTrue;
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(MagickFalse);
image=SyncNextImageInList(image);
status=SetImageExtent(image,w,h,exception);
if (status == MagickFalse)
break;
status=decoder(image,dds_info,exception);
if (status == MagickFalse)
break;
if ((w == 1) && (h == 1))
break;
w=DIV2(w);
h=DIV2(h);
}
}
return(status);
}
static MagickBooleanType ReadDXT1Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
x;
size_t
bits;
ssize_t
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 8 bytes of data from the image */
c0=ReadBlobLSBShort(image);
c1=ReadBlobLSBShort(image);
bits=ReadBlobLSBLong(image);
CalculateColors(c0,c1,&colors,MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse)
{
/* Correct alpha */
SetImageAlpha(image,QuantumRange,exception);
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q != (Quantum *) NULL)
SetDXT1Pixels(image,x,y,colors,bits,q);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
MagickSizeType
alpha_bits;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGBPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
x, y;
unsigned short
color;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q);
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType,exception);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBAPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleAlphaType,exception);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else if (alphaBits == 2)
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(color >> 8)),q);
SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q);
}
else
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)),q);
}
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)w*h*pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if ((clusterFit == MagickFalse) || (count == 0))
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image, ExceptionInfo *exception)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
fromlist,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace,exception);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (image->alpha_trait == UndefinedPixelTrait)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
mipmaps=0;
fromlist=MagickFalse;
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
{
if (LocaleNCompare(option,"fromlist",8) == 0)
{
Image
*next;
fromlist=MagickTrue;
next=image->next;
while(next != (Image *) NULL)
{
mipmaps++;
next=next->next;
}
}
}
if ((mipmaps == 0) &&
((image->columns & (image->columns - 1)) == 0) &&
((image->rows & (image->rows - 1)) == 0))
{
maxMipmaps=SIZE_MAX;
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
exception);
if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression,
mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse))
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MagickPathExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) memset(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) // bitcount / masks
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) // ddscaps2 + reserved region
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
register const Quantum
*p;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const Quantum *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(image,p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p+=GetPixelChannels(image);
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value, const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char *indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info,
const size_t pixelFormat,const size_t compression,const size_t mipmaps,
const MagickBooleanType fromlist,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha,ExceptionInfo *exception)
{
const char
*option;
Image
*mipmap_image,
*resize_image;
MagickBooleanType
fast_mipmaps,
status;
register ssize_t
i;
size_t
columns,
rows;
columns=DIV2(image->columns);
rows=DIV2(image->rows);
option=GetImageOption(image_info,"dds:fast-mipmaps");
fast_mipmaps=IsStringTrue(option);
mipmap_image=image;
resize_image=image;
status=MagickTrue;
for (i=0; i < (ssize_t) mipmaps; i++)
{
if (fromlist == MagickFalse)
{
mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter,
exception);
if (mipmap_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
}
else
{
mipmap_image=mipmap_image->next;
if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows))
ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported",
image->filename);
}
DestroyBlob(mipmap_image);
mipmap_image->blob=ReferenceBlob(image->blob);
WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
if (fromlist == MagickFalse)
{
if (fast_mipmaps == MagickFalse)
mipmap_image=DestroyImage(mipmap_image);
else
{
if (resize_image != image)
resize_image=DestroyImage(resize_image);
resize_image=mipmap_image;
}
}
columns=DIV2(columns);
rows=DIV2(rows);
}
if (resize_image != image)
resize_image=DestroyImage(resize_image);
return(status);
}
static void WriteSingleColorFit(Image *image, const DDSVector4 *points,
const ssize_t *map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p)));
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
}
}
|
kiss_fft.c | /*
* Copyright (c) 2003-2010, Mark Borgerding. All rights reserved.
* This file is part of KISS FFT - https://github.com/mborgerding/kissfft
*
* SPDX-License-Identifier: BSD-3-Clause
* See COPYING file for more information.
*/
#include "_kiss_fft_guts.h"
/* The guts header contains all the multiplication and addition macros that are defined for
fixed or floating point complex numbers. It also delares the kf_ internal functions.
*/
static void
kf_bfly2(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, int m) {
kiss_fft_cpx* Fout2;
kiss_fft_cpx* tw1 = st->twiddles;
kiss_fft_cpx t;
Fout2 = Fout + m;
do {
C_FIXDIV(*Fout, 2);
C_FIXDIV(*Fout2, 2);
C_MUL(t, *Fout2, *tw1);
tw1 += fstride;
C_SUB(*Fout2, *Fout, t);
C_ADDTO(*Fout, t);
++Fout2;
++Fout;
} while (--m);
}
static void
kf_bfly4(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, const size_t m) {
kiss_fft_cpx *tw1, *tw2, *tw3;
kiss_fft_cpx scratch[6];
size_t k = m;
const size_t m2 = 2 * m;
const size_t m3 = 3 * m;
tw3 = tw2 = tw1 = st->twiddles;
do {
C_FIXDIV(*Fout, 4);
C_FIXDIV(Fout[m], 4);
C_FIXDIV(Fout[m2], 4);
C_FIXDIV(Fout[m3], 4);
C_MUL(scratch[0], Fout[m], *tw1);
C_MUL(scratch[1], Fout[m2], *tw2);
C_MUL(scratch[2], Fout[m3], *tw3);
C_SUB(scratch[5], *Fout, scratch[1]);
C_ADDTO(*Fout, scratch[1]);
C_ADD(scratch[3], scratch[0], scratch[2]);
C_SUB(scratch[4], scratch[0], scratch[2]);
C_SUB(Fout[m2], *Fout, scratch[3]);
tw1 += fstride;
tw2 += fstride * 2;
tw3 += fstride * 3;
C_ADDTO(*Fout, scratch[3]);
if (st->inverse) {
Fout[m].r = scratch[5].r - scratch[4].i;
Fout[m].i = scratch[5].i + scratch[4].r;
Fout[m3].r = scratch[5].r + scratch[4].i;
Fout[m3].i = scratch[5].i - scratch[4].r;
} else {
Fout[m].r = scratch[5].r + scratch[4].i;
Fout[m].i = scratch[5].i - scratch[4].r;
Fout[m3].r = scratch[5].r - scratch[4].i;
Fout[m3].i = scratch[5].i + scratch[4].r;
}
++Fout;
} while (--k);
}
static void
kf_bfly3(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, size_t m) {
size_t k = m;
const size_t m2 = 2 * m;
kiss_fft_cpx *tw1, *tw2;
kiss_fft_cpx scratch[5];
kiss_fft_cpx epi3;
epi3 = st->twiddles[fstride * m];
tw1 = tw2 = st->twiddles;
do {
C_FIXDIV(*Fout, 3);
C_FIXDIV(Fout[m], 3);
C_FIXDIV(Fout[m2], 3);
C_MUL(scratch[1], Fout[m], *tw1);
C_MUL(scratch[2], Fout[m2], *tw2);
C_ADD(scratch[3], scratch[1], scratch[2]);
C_SUB(scratch[0], scratch[1], scratch[2]);
tw1 += fstride;
tw2 += fstride * 2;
Fout[m].r = Fout->r - HALF_OF(scratch[3].r);
Fout[m].i = Fout->i - HALF_OF(scratch[3].i);
C_MULBYSCALAR(scratch[0], epi3.i);
C_ADDTO(*Fout, scratch[3]);
Fout[m2].r = Fout[m].r + scratch[0].i;
Fout[m2].i = Fout[m].i - scratch[0].r;
Fout[m].r -= scratch[0].i;
Fout[m].i += scratch[0].r;
++Fout;
} while (--k);
}
static void
kf_bfly5(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, int m) {
kiss_fft_cpx *Fout0, *Fout1, *Fout2, *Fout3, *Fout4;
int u;
kiss_fft_cpx scratch[13];
kiss_fft_cpx* twiddles = st->twiddles;
kiss_fft_cpx* tw;
kiss_fft_cpx ya, yb;
ya = twiddles[fstride * m];
yb = twiddles[fstride * 2 * m];
Fout0 = Fout;
Fout1 = Fout0 + m;
Fout2 = Fout0 + 2 * m;
Fout3 = Fout0 + 3 * m;
Fout4 = Fout0 + 4 * m;
tw = st->twiddles;
for (u = 0; u < m; ++u) {
C_FIXDIV(*Fout0, 5);
C_FIXDIV(*Fout1, 5);
C_FIXDIV(*Fout2, 5);
C_FIXDIV(*Fout3, 5);
C_FIXDIV(*Fout4, 5);
scratch[0] = *Fout0;
C_MUL(scratch[1], *Fout1, tw[u * fstride]);
C_MUL(scratch[2], *Fout2, tw[2 * u * fstride]);
C_MUL(scratch[3], *Fout3, tw[3 * u * fstride]);
C_MUL(scratch[4], *Fout4, tw[4 * u * fstride]);
C_ADD(scratch[7], scratch[1], scratch[4]);
C_SUB(scratch[10], scratch[1], scratch[4]);
C_ADD(scratch[8], scratch[2], scratch[3]);
C_SUB(scratch[9], scratch[2], scratch[3]);
Fout0->r += scratch[7].r + scratch[8].r;
Fout0->i += scratch[7].i + scratch[8].i;
scratch[5].r = scratch[0].r + S_MUL(scratch[7].r, ya.r) + S_MUL(scratch[8].r, yb.r);
scratch[5].i = scratch[0].i + S_MUL(scratch[7].i, ya.r) + S_MUL(scratch[8].i, yb.r);
scratch[6].r = S_MUL(scratch[10].i, ya.i) + S_MUL(scratch[9].i, yb.i);
scratch[6].i = -S_MUL(scratch[10].r, ya.i) - S_MUL(scratch[9].r, yb.i);
C_SUB(*Fout1, scratch[5], scratch[6]);
C_ADD(*Fout4, scratch[5], scratch[6]);
scratch[11].r = scratch[0].r + S_MUL(scratch[7].r, yb.r) + S_MUL(scratch[8].r, ya.r);
scratch[11].i = scratch[0].i + S_MUL(scratch[7].i, yb.r) + S_MUL(scratch[8].i, ya.r);
scratch[12].r = -S_MUL(scratch[10].i, yb.i) + S_MUL(scratch[9].i, ya.i);
scratch[12].i = S_MUL(scratch[10].r, yb.i) - S_MUL(scratch[9].r, ya.i);
C_ADD(*Fout2, scratch[11], scratch[12]);
C_SUB(*Fout3, scratch[11], scratch[12]);
++Fout0;
++Fout1;
++Fout2;
++Fout3;
++Fout4;
}
}
/* perform the butterfly for one stage of a mixed radix FFT */
static void
kf_bfly_generic(kiss_fft_cpx* Fout, const size_t fstride, const kiss_fft_cfg st, int m, int p) {
int u, k, q1, q;
kiss_fft_cpx* twiddles = st->twiddles;
kiss_fft_cpx t;
int Norig = st->nfft;
kiss_fft_cpx* scratch = (kiss_fft_cpx*)KISS_FFT_TMP_ALLOC(sizeof(kiss_fft_cpx) * p);
for (u = 0; u < m; ++u) {
k = u;
for (q1 = 0; q1 < p; ++q1) {
scratch[q1] = Fout[k];
C_FIXDIV(scratch[q1], p);
k += m;
}
k = u;
for (q1 = 0; q1 < p; ++q1) {
int twidx = 0;
Fout[k] = scratch[0];
for (q = 1; q < p; ++q) {
twidx += fstride * k;
if (twidx >= Norig)
twidx -= Norig;
C_MUL(t, scratch[q], twiddles[twidx]);
C_ADDTO(Fout[k], t);
}
k += m;
}
}
KISS_FFT_TMP_FREE(scratch);
}
static void
kf_work(kiss_fft_cpx* Fout, const kiss_fft_cpx* f, const size_t fstride, int in_stride,
int* factors, const kiss_fft_cfg st) {
kiss_fft_cpx* Fout_beg = Fout;
const int p = *factors++; /* the radix */
const int m = *factors++; /* stage's fft length/p */
const kiss_fft_cpx* Fout_end = Fout + p * m;
#ifdef _OPENMP
// use openmp extensions at the
// top-level (not recursive)
if (fstride == 1 && p <= 5 && m != 1) {
int k;
// execute the p different work units in different threads
#pragma omp parallel for
for (k = 0; k < p; ++k)
kf_work(Fout + k * m, f + fstride * in_stride * k, fstride * p, in_stride, factors, st);
// all threads have joined by this point
switch (p) {
case 2:
kf_bfly2(Fout, fstride, st, m);
break;
case 3:
kf_bfly3(Fout, fstride, st, m);
break;
case 4:
kf_bfly4(Fout, fstride, st, m);
break;
case 5:
kf_bfly5(Fout, fstride, st, m);
break;
default:
kf_bfly_generic(Fout, fstride, st, m, p);
break;
}
return;
}
#endif
if (m == 1) {
do {
*Fout = *f;
f += fstride * in_stride;
} while (++Fout != Fout_end);
} else {
do {
// recursive call:
// DFT of size m*p performed by doing
// p instances of smaller DFTs of size m,
// each one takes a decimated version of the input
kf_work(Fout, f, fstride * p, in_stride, factors, st);
f += fstride * in_stride;
} while ((Fout += m) != Fout_end);
}
Fout = Fout_beg;
// recombine the p smaller DFTs
switch (p) {
case 2:
kf_bfly2(Fout, fstride, st, m);
break;
case 3:
kf_bfly3(Fout, fstride, st, m);
break;
case 4:
kf_bfly4(Fout, fstride, st, m);
break;
case 5:
kf_bfly5(Fout, fstride, st, m);
break;
default:
kf_bfly_generic(Fout, fstride, st, m, p);
break;
}
}
/* facbuf is populated by p1,m1,p2,m2, ...
where
p[i] * m[i] = m[i-1]
m0 = n */
static void
kf_factor(int n, int* facbuf) {
int p = 4;
double floor_sqrt;
floor_sqrt = floor(sqrt((double)n));
/*factor out powers of 4, powers of 2, then any remaining primes */
do {
while (n % p) {
switch (p) {
case 4:
p = 2;
break;
case 2:
p = 3;
break;
default:
p += 2;
break;
}
if (p > floor_sqrt)
p = n; /* no more factors, skip to end */
}
n /= p;
*facbuf++ = p;
*facbuf++ = n;
} while (n > 1);
}
/*
*
* User-callable function to allocate all necessary storage space for the fft.
*
* The return value is a contiguous block of memory, allocated with malloc. As such,
* It can be freed with free(), rather than a kiss_fft-specific function.
* */
kiss_fft_cfg
kiss_fft_alloc(int nfft, int inverse_fft, void* mem, size_t* lenmem) {
KISS_FFT_ALIGN_CHECK(mem)
kiss_fft_cfg st = NULL;
size_t memneeded = KISS_FFT_ALIGN_SIZE_UP(
sizeof(struct kiss_fft_state) + sizeof(kiss_fft_cpx) * (nfft - 1)); /* twiddle factors*/
if (lenmem == NULL) {
st = (kiss_fft_cfg)KISS_FFT_MALLOC(memneeded);
} else {
if (mem != NULL && *lenmem >= memneeded)
st = (kiss_fft_cfg)mem;
*lenmem = memneeded;
}
if (st) {
int i;
st->nfft = nfft;
st->inverse = inverse_fft;
for (i = 0; i < nfft; ++i) {
const double pi = 3.141592653589793238462643383279502884197169399375105820974944;
double phase = -2 * pi * i / nfft;
if (st->inverse)
phase *= -1;
kf_cexp(st->twiddles + i, phase);
}
kf_factor(nfft, st->factors);
}
return st;
}
void
kiss_fft_stride(kiss_fft_cfg st, const kiss_fft_cpx* fin, kiss_fft_cpx* fout, int in_stride) {
if (fin == fout) {
// NOTE: this is not really an in-place FFT algorithm.
// It just performs an out-of-place FFT into a temp buffer
kiss_fft_cpx* tmpbuf = (kiss_fft_cpx*)KISS_FFT_TMP_ALLOC(sizeof(kiss_fft_cpx) * st->nfft);
kf_work(tmpbuf, fin, 1, in_stride, st->factors, st);
memcpy(fout, tmpbuf, sizeof(kiss_fft_cpx) * st->nfft);
KISS_FFT_TMP_FREE(tmpbuf);
} else {
kf_work(fout, fin, 1, in_stride, st->factors, st);
}
}
void
kiss_fft(kiss_fft_cfg cfg, const kiss_fft_cpx* fin, kiss_fft_cpx* fout) {
kiss_fft_stride(cfg, fin, fout, 1);
}
void
kiss_fft_cleanup(void) {
// nothing needed any more
}
int
kiss_fft_next_fast_size(int n) {
while (1) {
int m = n;
while ((m % 2) == 0)
m /= 2;
while ((m % 3) == 0)
m /= 3;
while ((m % 5) == 0)
m /= 5;
if (m <= 1)
break; /* n is completely factorable by twos, threes, and fives */
n++;
}
return n;
}
|
Features.h | /**
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#pragma once
#include "saiga/vision/VisionIncludes.h"
#include <array>
namespace Saiga
{
/**
* Identical to OpenCV's Keypoint struct except it uses
* Eigen Points and no 'class_id' member.
* @brief The Keypoint struct
*/
template <typename T = float>
class KeyPoint
{
public:
using Vec2 = Eigen::Matrix<T, 2, 1>;
Vec2 point;
T size;
T angle;
T response;
int octave;
HD KeyPoint() {}
HD explicit KeyPoint(const Vec2& _pt, T _size = 0, T _angle = -1, T _response = 0, int _octave = 0)
: point(_pt), size(_size), angle(_angle), response(_response), octave(_octave)
{
}
HD KeyPoint(T _x, T _y, T _size = 0, T _angle = -1, T _response = 0, int _octave = 0)
: point(_x, _y), size(_size), angle(_angle), response(_response), octave(_octave)
{
}
bool operator==(const KeyPoint& other) const
{
return point == other.point && size == other.size && angle == other.angle && response == other.response &&
octave == other.octave;
}
bool operator<(const KeyPoint& other) const { return response < other.response; }
friend std::ostream& operator<<(std::ostream& os, const KeyPoint& kpt)
{
os << kpt.point.transpose() << ": size=" << kpt.size << ", angle=" << kpt.angle << ", response=" << kpt.response
<< ", octave=" << kpt.octave;
return os;
}
template <typename CastType>
KeyPoint<CastType> cast()
{
return KeyPoint<CastType>(point.template cast<CastType>(), size, angle, response, octave);
}
};
// Some common feature descriptors
using DescriptorORB = std::array<uint64_t, 4>;
using DescriptorSIFT = std::array<float, 128>;
// Debug method to print orb descriptors
inline std::string OrbDescriptorToBitString(const DescriptorORB& desc)
{
std::string result;
for (auto i : desc)
{
for (int j = 0; j < 64; ++j)
{
result.push_back(((i >> j) & 1) ? '1' : '0');
}
}
return result;
}
#if !defined(_WIN32) && EIGEN_ARCH_i386_OR_x86_64
// use the popcnt instruction
// this will be the fastest implementation if it is available
// more here: https://github.com/kimwalisch/libpopcnt
inline uint32_t popcnt(uint32_t x)
{
__asm__("popcnt %1, %0" : "=r"(x) : "0"(x));
return x;
}
inline uint64_t popcnt(uint64_t x)
{
__asm__("popcnt %1, %0" : "=r"(x) : "0"(x));
return x;
}
#else
// Bit count function got from:
// http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan
inline uint32_t popcnt(uint32_t v)
{
v = v - ((v >> 1) & 0x55555555);
v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
return (((v + (v >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24;
}
inline uint64_t popcnt(uint64_t v)
{
v = v - ((v >> 1) & (uint64_t) ~(uint64_t)0 / 3);
v = (v & (uint64_t) ~(uint64_t)0 / 15 * 3) + ((v >> 2) & (uint64_t) ~(uint64_t)0 / 15 * 3);
v = (v + (v >> 4)) & (uint64_t) ~(uint64_t)0 / 255 * 15;
return (uint64_t)(v * ((uint64_t) ~(uint64_t)0 / 255)) >> (sizeof(uint64_t) - 1) * CHAR_BIT;
}
#endif
// Compute the hamming distance between the two descriptors
// Same implementation as ORB SLAM
// http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
inline int distance(const DescriptorORB& a, const DescriptorORB& b)
{
int dist = 0;
for (int i = 0; i < (int)a.size(); i++)
{
auto v = a[i] ^ b[i];
// TODO: if this is really a bottleneck we can also use AVX-2
// to gain around 25% more performance
// according to this source:
// https://github.com/kimwalisch/libpopcnt
dist += popcnt(v);
}
return dist;
}
// Compute the euclidean distance between the descriptors
inline float distance(const DescriptorSIFT& a, const DescriptorSIFT& b)
{
float sumSqr = 0;
for (int i = 0; i < 128; ++i)
{
auto c = a[i] - b[i];
sumSqr += c * c;
}
return sqrt(sumSqr);
}
template <typename T>
struct MeanMatcher
{
inline int bestDescriptorFromArray(const std::vector<T>& descriptors)
{
static_assert(std::is_same<T, DescriptorORB>::value, "Only implemented for ORB so far.");
if (descriptors.size() == 0) return -1;
// Compute distances between them
size_t N = descriptors.size();
std::vector<std::vector<int>> Distances(N, std::vector<int>(N));
for (size_t i = 0; i < N; i++)
{
Distances[i][i] = 0;
for (size_t j = i + 1; j < N; j++)
{
int distij = distance(descriptors[i], descriptors[j]);
Distances[i][j] = distij;
Distances[j][i] = distij;
}
}
// Take the descriptor with least median distance to the rest
int BestMedian = INT_MAX;
int BestIdx = 0;
for (size_t i = 0; i < N; i++)
{
// vector<int> vDists(Distances[i],Distances[i]+N);
auto& vDists = Distances[i];
sort(vDists.begin(), vDists.end());
int median = vDists[0.5 * (N - 1)];
if (median < BestMedian)
{
BestMedian = median;
BestIdx = i;
}
}
return BestIdx;
}
inline T MeanDescriptor(const std::vector<T>& descriptors)
{
static_assert(std::is_same<T, DescriptorORB>::value, "Only implemented for ORB so far.");
if (descriptors.empty())
{
return {};
}
else if (descriptors.size() == 1)
{
return descriptors[0];
}
else
{
std::vector<int> sum(32 * 8, 0);
for (size_t i = 0; i < descriptors.size(); ++i)
{
const auto& d = descriptors[i];
const unsigned char* p = (const unsigned char*)d.data();
for (int j = 0; j < 32; ++j, ++p)
{
if (*p & (1 << 7)) ++sum[j * 8];
if (*p & (1 << 6)) ++sum[j * 8 + 1];
if (*p & (1 << 5)) ++sum[j * 8 + 2];
if (*p & (1 << 4)) ++sum[j * 8 + 3];
if (*p & (1 << 3)) ++sum[j * 8 + 4];
if (*p & (1 << 2)) ++sum[j * 8 + 5];
if (*p & (1 << 1)) ++sum[j * 8 + 6];
if (*p & (1)) ++sum[j * 8 + 7];
}
}
T mean;
std::fill(mean.begin(), mean.end(), 0);
unsigned char* p = (unsigned char*)mean.data();
const int N2 = (int)descriptors.size() / 2 + descriptors.size() % 2;
for (size_t i = 0; i < sum.size(); ++i)
{
if (sum[i] >= N2)
{
// set bit
*p |= 1 << (7 - (i % 8));
}
if (i % 8 == 7) ++p;
}
return mean;
}
}
inline T MeanDescriptorp(const std::vector<const T*>& descriptors)
{
static_assert(std::is_same<T, DescriptorORB>::value, "Only implemented for ORB so far.");
if (descriptors.empty())
{
return {};
}
else if (descriptors.size() == 1)
{
return *descriptors[0];
}
else
{
std::vector<int> sum(32 * 8, 0);
for (size_t i = 0; i < descriptors.size(); ++i)
{
const auto& d = *descriptors[i];
const unsigned char* p = (const unsigned char*)d.data();
for (int j = 0; j < 32; ++j, ++p)
{
if (*p & (1 << 7)) ++sum[j * 8];
if (*p & (1 << 6)) ++sum[j * 8 + 1];
if (*p & (1 << 5)) ++sum[j * 8 + 2];
if (*p & (1 << 4)) ++sum[j * 8 + 3];
if (*p & (1 << 3)) ++sum[j * 8 + 4];
if (*p & (1 << 2)) ++sum[j * 8 + 5];
if (*p & (1 << 1)) ++sum[j * 8 + 6];
if (*p & (1)) ++sum[j * 8 + 7];
}
}
T mean;
std::fill(mean.begin(), mean.end(), 0);
unsigned char* p = (unsigned char*)mean.data();
const int N2 = (int)descriptors.size() / 2 + descriptors.size() % 2;
for (size_t i = 0; i < sum.size(); ++i)
{
if (sum[i] >= N2)
{
// set bit
*p |= 1 << (7 - (i % 8));
}
if (i % 8 == 7) ++p;
}
return mean;
}
}
};
template <typename T>
struct BruteForceMatcher
{
using DistanceType = int;
template <typename _InputIterator>
void match(_InputIterator first1, int n, _InputIterator first2, int m)
{
distances.resize(n, m);
for (auto i : Range<int>(0, n))
{
auto d2 = first2;
for (auto j : Range<int>(0, m))
{
distances(i, j) = distance(*first1, *d2);
++d2;
}
++first1;
}
int sum = distances.sum();
std::cout << "distance sum: " << sum << " avg: " << double(sum) / (n * m) << std::endl;
}
void matchKnn2(Saiga::ArrayView<DescriptorORB> desc1, Saiga::ArrayView<DescriptorORB> desc2)
{
knn2.resize(desc1.size(), 2);
for (int i = 0; i < (int)desc1.size(); ++i)
{
// init best to infinity distance
knn2(i, 0) = {1000, -1};
knn2(i, 1) = knn2(i, 0);
for (int j = 0; j < (int)desc2.size(); ++j)
{
auto dis = distance(desc1[i], desc2[j]);
if (dis < knn2(i, 0).first)
{
// set second best to old best
knn2(i, 1) = knn2(i, 0);
// create new best
knn2(i, 0).first = dis;
knn2(i, 0).second = j;
}
else if (dis < knn2(i, 1).first)
{
// override second best
knn2(i, 1).first = dis;
knn2(i, 1).second = j;
}
}
}
}
void matchKnn2_omp(Saiga::ArrayView<DescriptorORB> desc1, Saiga::ArrayView<DescriptorORB> desc2, int threads)
{
knn2.resize(desc1.size(), 2);
#pragma omp parallel for num_threads(threads)
for (int i = 0; i < (int)desc1.size(); ++i)
{
// init best to infinity distance
knn2(i, 0) = {1000, -1};
knn2(i, 1) = knn2(i, 0);
for (int j = 0; j < (int)desc2.size(); ++j)
{
auto dis = distance(desc1[i], desc2[j]);
if (dis < knn2(i, 0).first)
{
// set second best to old best
knn2(i, 1) = knn2(i, 0);
// create new best
knn2(i, 0).first = dis;
knn2(i, 0).second = j;
}
else if (dis < knn2(i, 1).first)
{
// override second best
knn2(i, 1).first = dis;
knn2(i, 1).second = j;
}
}
}
}
/**
* Filter matches by ratio test and threshold.
* You must have used the knn2 method above before!
*/
int filterMatches(DistanceType threshold, float ratioThreshold)
{
matches.clear();
matches.reserve(knn2.rows());
for (auto i : Range<int>(0, knn2.rows()))
{
// the best distance is still larger than the threshold
if (knn2(i, 0).first > threshold) continue;
// float ratio = float(knn2(i, 0).first) / float(knn2(i, 1).first);
// std::cout << "for " << i << " best/second: " << knn2(i, 0).first << "/" << knn2(i, 1).first <<
// " " << ratio
// << std::endl;
if (float(knn2(i, 0).first) > float(knn2(i, 1).first) * ratioThreshold) continue;
matches.push_back({i, knn2(i, 0).second});
}
return matches.size();
}
Eigen::Matrix<DistanceType, -1, -1, Eigen::RowMajor> distances;
// contains the matches index + the distance
Eigen::Matrix<std::pair<DistanceType, int>, -1, 2, Eigen::RowMajor> knn2;
std::vector<std::pair<int, int>> matches;
};
} // namespace Saiga
|
cg.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - CG
This benchmark is an OpenMP C version of the NPB CG code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: M. Yarrow
C. Kuszmaul
OpenMP C version: S. Satoh
3.0 structure translation: F. Conti
--------------------------------------------------------------------*/
/*
c---------------------------------------------------------------------
c Note: please observe that in the routine conj_grad three
c implementations of the sparse matrix-vector multiply have
c been supplied. The default matrix-vector multiply is not
c loop unrolled. The alternate implementations are unrolled
c to a depth of 2 and unrolled to a depth of 8. Please
c experiment with these to find the fastest for your particular
c architecture. If reporting timing results, any of these three may
c be used without penalty.
c---------------------------------------------------------------------
*/
#include "../common/npb-C.h"
#include "npbparams.h"
#include <nautilus/nautilus.h>
#include <nautilus/shell.h>
#include "../math/nas_math.h"
#define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2)
/* global variables */
/* common /partit_size/ */
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
/* common /main_int_mem/ */
static int colidx[NZ+1]; /* colidx[1:NZ] */
static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */
static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */
static int arow[NZ+1]; /* arow[1:NZ] */
static int acol[NZ+1]; /* acol[1:NZ] */
/* common /main_flt_mem/ */
static double v[NA+1+1]; /* v[1:NA+1] */
static double aelt[NZ+1]; /* aelt[1:NZ] */
static double a[NZ+1]; /* a[1:NZ] */
static double x[NA+2+1]; /* x[1:NA+2] */
static double z[NA+2+1]; /* z[1:NA+2] */
static double p[NA+2+1]; /* p[1:NA+2] */
static double q[NA+2+1]; /* q[1:NA+2] */
static double r[NA+2+1]; /* r[1:NA+2] */
//static double w[NA+2+1]; /* w[1:NA+2] */
/* common /urando/ */
static double amult;
static double tran;
/* function declarations */
static void conj_grad (int colidx[], int rowstr[], double x[], double z[],
double a[], double p[], double q[], double r[],
//double w[],
double *rnorm);
static void makea(int n, int nz, double a[], int colidx[], int rowstr[],
int nonzer, int firstrow, int lastrow, int firstcol,
int lastcol, double rcond, int arow[], int acol[],
double aelt[], double v[], int iv[], double shift );
static void sparse(double a[], int colidx[], int rowstr[], int n,
int arow[], int acol[], double aelt[],
int firstrow, int lastrow,
double x[], boolean mark[], int nzloc[], int nnza);
static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[],
int mark[]);
static int icnvrt(double x, int ipwr2);
static void vecset(int n, double v[], int iv[], int *nzv, int i, double val);
/*--------------------------------------------------------------------
program cg
--------------------------------------------------------------------*/
static int program_CG(char *__buf, void* __priv);
int program_CG_profile(char *_, void *__);
static struct shell_cmd_impl nas_cg_impl = {
.cmd = "nas-cg",
.help_str = "NAS parallel benchmark CG",
.handler = program_CG_profile,
};
nk_register_shell_cmd(nas_cg_impl);
/*--------------------------------------------------------------------
program CG
c-------------------------------------------------------------------*/
int program_CG_profile(char *_, void *__){
#ifdef NAUT_CONFIG_PROFILE
nk_instrument_clear();
nk_instrument_start();
#endif
program_CG(_,__);
#ifdef NAUT_CONFIG_PROFILE
nk_instrument_end();
nk_instrument_query();
#endif
return 0;
}
static int program_CG(char *__buf, void* __priv) {
int i, j, k, it;
int nthreads = 1;
double zeta;
double rnorm;
double norm_temp11;
double norm_temp12;
double t, mflops;
char class;
boolean verified;
double zeta_verify_value, epsilon;
firstrow = 1;
lastrow = NA;
firstcol = 1;
lastcol = NA;
if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) {
class = 'S';
zeta_verify_value = 8.5971775078648;
} else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) {
class = 'W';
zeta_verify_value = 10.362595087124;
} else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) {
class = 'A';
zeta_verify_value = 17.130235054029;
} else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) {
class = 'B';
zeta_verify_value = 22.712745482631;
} else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) {
class = 'C';
zeta_verify_value = 28.973605592845;
} else {
class = 'U';
}
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - CG Benchmark\n");
printf(" Size: %10d\n", NA);
printf(" Iterations: %5d\n", NITER);
naa = NA;
nzz = NZ;
/*--------------------------------------------------------------------
c Initialize random number generator
c-------------------------------------------------------------------*/
tran = 314159265.0;
amult = 1220703125.0;
zeta = randlc( &tran, amult );
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
makea(naa, nzz, a, colidx, rowstr, NONZER,
firstrow, lastrow, firstcol, lastcol,
RCOND, arow, acol, aelt, v, iv, SHIFT);
/*---------------------------------------------------------------------
c Note: as a result of the above call to makea:
c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1
c values of colidx which are col indexes go from firstcol --> lastcol
c So:
c Shift the col index vals from actual (firstcol --> lastcol )
c to local, i.e., (1 --> lastcol-firstcol+1)
c---------------------------------------------------------------------*/
#pragma omp parallel default(shared) private(i,j,k)
{
#pragma omp for nowait
for (j = 1; j <= lastrow - firstrow + 1; j++) {
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
colidx[k] = colidx[k] - firstcol + 1;
}
}
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (i = 1; i <= NA+1; i++) {
x[i] = 1.0;
}
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
}// end omp parallel
zeta = 0.0;
/*-------------------------------------------------------------------
c---->
c Do one iteration untimed to init all code and data page tables
c----> (then reinit, start timing, to niter its)
c-------------------------------------------------------------------*/
for (it = 1; it <= 1; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
conj_grad (colidx, rowstr, x, z, a, p, q, r,/* w,*/ &rnorm);
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
norm_temp11 = 0.0;
norm_temp12 = 0.0;
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)
for (j = 1; j <= lastcol-firstcol+1; j++) {
norm_temp11 = norm_temp11 + x[j]*z[j];
norm_temp12 = norm_temp12 + z[j]*z[j];
}
norm_temp12 = 1.0 / sqrt( norm_temp12 );
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(j)
for (j = 1; j <= lastcol-firstcol+1; j++) {
x[j] = norm_temp12*z[j];
}
} /* end of do one iteration untimed */
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(i)
for (i = 1; i <= NA+1; i++) {
x[i] = 1.0;
}
zeta = 0.0;
timer_clear( 1 );
timer_start( 1 );
/*--------------------------------------------------------------------
c---->
c Main Iteration for inverse power method
c---->
c-------------------------------------------------------------------*/
for (it = 1; it <= NITER; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
conj_grad(colidx, rowstr, x, z, a, p, q, r/*, w*/, &rnorm);
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
norm_temp11 = 0.0;
norm_temp12 = 0.0;
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)
for (j = 1; j <= lastcol-firstcol+1; j++) {
norm_temp11 = norm_temp11 + x[j]*z[j];
norm_temp12 = norm_temp12 + z[j]*z[j];
}
norm_temp12 = 1.0 / sqrt( norm_temp12 );
zeta = SHIFT + 1.0 / norm_temp11;
if( it == 1 ) {
printf(" iteration ||r|| zeta\n");
}
printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta);
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(j)
for (j = 1; j <= lastcol-firstcol+1; j++) {
x[j] = norm_temp12*z[j];
}
} /* end of main iter inv pow meth */
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop( 1 );
/*--------------------------------------------------------------------
c End of timed section
c-------------------------------------------------------------------*/
t = timer_read( 1 );
printf(" Benchmark completed\n");
epsilon = 1.0e-10;
if (class != 'U') {
if (fabs(zeta - zeta_verify_value) <= epsilon) {
verified = TRUE;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.12e\n", zeta);
printf(" Error is %20.12e\n", zeta - zeta_verify_value);
} else {
verified = FALSE;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.12e\n", zeta);
printf(" The correct zeta is %20.12e\n", zeta_verify_value);
}
} else {
verified = FALSE;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if ( t != 0.0 ) {
mflops = (2.0*NITER*NA)
* (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 )
/ t / 1000000.0;
} else {
mflops = 0.0;
}
c_print_results("CG", class, NA, 0, 0, NITER, nthreads, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void conj_grad (
int colidx[], /* colidx[1:nzz] */
int rowstr[], /* rowstr[1:naa+1] */
double x[], /* x[*] */
double z[], /* z[*] */
double a[], /* a[1:nzz] */
double p[], /* p[*] */
double q[], /* q[*] */
double r[], /* r[*] */
//double w[], /* w[*] */
double *rnorm )
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*---------------------------------------------------------------------
c Floaging point arrays here are named as in NPB1 spec discussion of
c CG algorithm
c---------------------------------------------------------------------*/
{
static int callcount = 0;
double d, sum, rho, rho0, alpha, beta;
int i, j, k;
int cgit, cgitmax = 25;
rho = 0.0;
#pragma omp parallel default(shared) private(j,sum) shared(rho,naa)
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
{
#pragma omp for
for (j = 1; j <= naa+1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = x[j];
p[j] = r[j];
//w[j] = 0.0;
}
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
#pragma omp for reduction(+:rho)
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + r[j]*r[j];
}
}/* end omp parallel */
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
rho0 = rho;
d = 0.0;
rho = 0.0;
#pragma omp parallel default(shared) private(j,k,sum,alpha,beta) shared(d,rho0,rho)
{
/*--------------------------------------------------------------------
c q = A.p
c The partition submatrix-vector multiply: use workspace w
c---------------------------------------------------------------------
C
C NOTE: this version of the multiply is actually (slightly: maybe %5)
C faster on the sp2 on 16 nodes than is the unrolled-by-2 version
C below. On the Cray t3d, the reverse is true, i.e., the
C unrolled-by-two version is some 10% faster.
C The unrolled-by-8 version below is significantly faster
C on the Cray t3d - overall speed of code is 1.5 times faster.
*/
/* rolled version */
#pragma omp for
for (j = 1; j <= lastrow-firstrow+1; j++) {
sum = 0.0;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
//w[j] = sum;
q[j] = sum;
}
/* unrolled-by-two version
#pragma omp for private(i,k)
for (j = 1; j <= lastrow-firstrow+1; j++) {
int iresidue;
double sum1, sum2;
i = rowstr[j];
iresidue = (rowstr[j+1]-i) % 2;
sum1 = 0.0;
sum2 = 0.0;
if (iresidue == 1) sum1 = sum1 + a[i]*p[colidx[i]];
for (k = i+iresidue; k <= rowstr[j+1]-2; k += 2) {
sum1 = sum1 + a[k] * p[colidx[k]];
sum2 = sum2 + a[k+1] * p[colidx[k+1]];
}
w[j] = sum1 + sum2;
}
*/
/* unrolled-by-8 version
#pragma omp for private(i,k,sum)
for (j = 1; j <= lastrow-firstrow+1; j++) {
int iresidue;
i = rowstr[j];
iresidue = (rowstr[j+1]-i) % 8;
sum = 0.0;
for (k = i; k <= i+iresidue-1; k++) {
sum = sum + a[k] * p[colidx[k]];
}
for (k = i+iresidue; k <= rowstr[j+1]-8; k += 8) {
sum = sum + a[k ] * p[colidx[k ]]
+ a[k+1] * p[colidx[k+1]]
+ a[k+2] * p[colidx[k+2]]
+ a[k+3] * p[colidx[k+3]]
+ a[k+4] * p[colidx[k+4]]
+ a[k+5] * p[colidx[k+5]]
+ a[k+6] * p[colidx[k+6]]
+ a[k+7] * p[colidx[k+7]];
}
w[j] = sum;
}
*/
/*
#pragma omp for
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = w[j];
}
*/
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
/*
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0;
}
*/
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
#pragma omp for reduction(+:d)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = d + p[j]*q[j];
}
#pragma omp barrier
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
//#pragma omp single
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
#pragma omp for reduction(+:rho)
for (j = 1; j <= lastcol-firstcol+1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
// }
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
/*
#pragma omp for
for (j = 1; j <= lastcol-firstcol+1; j++) {*/
rho = rho + r[j]*r[j];
}
//#pragma omp barrier
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
//#pragma omp single
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
p[j] = r[j] + beta*p[j];
}
callcount++;
} /* end omp parallel */
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
sum = 0.0;
#pragma omp parallel default(shared) private(j,d) shared(sum)
{
#pragma omp for //private(d, k)
for (j = 1; j <= lastrow-firstrow+1; j++) {
d = 0.0;
for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {
d = d + a[k]*z[colidx[k]];
}
r[j] = d;
}
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
#pragma omp for reduction(+:sum)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = x[j] - r[j];
sum = sum + d*d;
}
} //end omp parallel
(*rnorm) = sqrt(sum);
}
/*---------------------------------------------------------------------
c generate the test problem for benchmark 6
c makea generates a sparse matrix with a
c prescribed sparsity distribution
c
c parameter type usage
c
c input
c
c n i number of cols/rows of matrix
c nz i nonzeros as declared array size
c rcond r*8 condition number
c shift r*8 main diagonal shift
c
c output
c
c a r*8 array for nonzeros
c colidx i col indices
c rowstr i row pointers
c
c workspace
c
c iv, arow, acol i
c v, aelt r*8
c---------------------------------------------------------------------*/
static void makea(
int n,
int nz,
double a[], /* a[1:nz] */
int colidx[], /* colidx[1:nz] */
int rowstr[], /* rowstr[1:n+1] */
int nonzer,
int firstrow,
int lastrow,
int firstcol,
int lastcol,
double rcond,
int arow[], /* arow[1:nz] */
int acol[], /* acol[1:nz] */
double aelt[], /* aelt[1:nz] */
double v[], /* v[1:n+1] */
int iv[], /* iv[1:2*n+1] */
double shift )
{
int i, nnza, iouter, ivelt, ivelt1, irow, nzv;
/*--------------------------------------------------------------------
c nonzer is approximately (int(sqrt(nnza /n)));
c-------------------------------------------------------------------*/
double size, ratio, scale;
int jcol;
size = 1.0;
ratio = pow(rcond, (1.0 / (double)n));
nnza = 0;
/*---------------------------------------------------------------------
c Initialize colidx(n+1 .. 2n) to zero.
c Used by sprnvc to mark nonzero positions
c---------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(i)
for (i = 1; i <= n; i++) {
colidx[n+i] = 0;
}
for (iouter = 1; iouter <= n; iouter++) {
nzv = nonzer;
sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n]));
vecset(n, v, iv, &nzv, iouter, 0.5);
for (ivelt = 1; ivelt <= nzv; ivelt++) {
jcol = iv[ivelt];
if (jcol >= firstcol && jcol <= lastcol) {
scale = size * v[ivelt];
for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) {
irow = iv[ivelt1];
if (irow >= firstrow && irow <= lastrow) {
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in"
" makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = jcol;
arow[nnza] = irow;
aelt[nnza] = v[ivelt1] * scale;
}
}
}
}
size = size * ratio;
}
/*---------------------------------------------------------------------
c ... add the identity * rcond to the generated matrix to bound
c the smallest eigenvalue from below by rcond
c---------------------------------------------------------------------*/
for (i = firstrow; i <= lastrow; i++) {
if (i >= firstcol && i <= lastcol) {
iouter = n + i;
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = i;
arow[nnza] = i;
aelt[nnza] = rcond - shift;
}
}
/*---------------------------------------------------------------------
c ... make the sparse matrix from list of elements with duplicates
c (v and iv are used as workspace)
c---------------------------------------------------------------------*/
sparse(a, colidx, rowstr, n, arow, acol, aelt,
firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza);
}
/*---------------------------------------------------
c generate a sparse matrix from a list of
c [col, row, element] tri
c---------------------------------------------------*/
static void sparse(
double a[], /* a[1:*] */
int colidx[], /* colidx[1:*] */
int rowstr[], /* rowstr[1:*] */
int n,
int arow[], /* arow[1:*] */
int acol[], /* acol[1:*] */
double aelt[], /* aelt[1:*] */
int firstrow,
int lastrow,
double x[], /* x[1:n] */
boolean mark[], /* mark[1:n] */
int nzloc[], /* nzloc[1:n] */
int nnza)
/*---------------------------------------------------------------------
c rows range from firstrow to lastrow
c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
c---------------------------------------------------------------------*/
{
int nrows;
int i, j, jajp1, nza, k, nzrow;
double xi;
/*--------------------------------------------------------------------
c how many rows of result
c-------------------------------------------------------------------*/
nrows = lastrow - firstrow + 1;
/*--------------------------------------------------------------------
c ...count the number of triples in each row
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(j)
for (j = 1; j <= n; j++) {
rowstr[j] = 0;
mark[j] = FALSE;
}
rowstr[n+1] = 0;
for (nza = 1; nza <= nnza; nza++) {
j = (arow[nza] - firstrow + 1) + 1;
rowstr[j] = rowstr[j] + 1;
}
rowstr[1] = 1;
for (j = 2; j <= nrows+1; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}
/*---------------------------------------------------------------------
c ... rowstr(j) now is the location of the first nonzero
c of row j of a
c---------------------------------------------------------------------*/
/*---------------------------------------------------------------------
c ... preload data pages
c---------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(k,j)
for(j = 0;j <= nrows-1;j++) {
for(k = rowstr[j];k <= rowstr[j+1]-1;k++)
a[k] = 0.0;
}
/*--------------------------------------------------------------------
c ... do a bucket sort of the triples on the row index
c-------------------------------------------------------------------*/
for (nza = 1; nza <= nnza; nza++) {
j = arow[nza] - firstrow + 1;
k = rowstr[j];
a[k] = aelt[nza];
colidx[k] = acol[nza];
rowstr[j] = rowstr[j] + 1;
}
/*--------------------------------------------------------------------
c ... rowstr(j) now points to the first element of row j+1
c-------------------------------------------------------------------*/
for (j = nrows; j >= 1; j--) {
rowstr[j+1] = rowstr[j];
}
rowstr[1] = 1;
/*--------------------------------------------------------------------
c ... generate the actual output rows by adding elements
c-------------------------------------------------------------------*/
nza = 0;
#pragma omp parallel for default(shared) private(i)
for (i = 1; i <= n; i++) {
x[i] = 0.0;
mark[i] = FALSE;
}
jajp1 = rowstr[1];
for (j = 1; j <= nrows; j++) {
nzrow = 0;
/*--------------------------------------------------------------------
c ...loop over the jth row of a
c-------------------------------------------------------------------*/
for (k = jajp1; k < rowstr[j+1]; k++) {
i = colidx[k];
x[i] = x[i] + a[k];
if ( mark[i] == FALSE && x[i] != 0.0) {
mark[i] = TRUE;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
}
}
/*--------------------------------------------------------------------
c ... extract the nonzeros of this row
c-------------------------------------------------------------------*/
for (k = 1; k <= nzrow; k++) {
i = nzloc[k];
mark[i] = FALSE;
xi = x[i];
x[i] = 0.0;
if (xi != 0.0) {
nza = nza + 1;
a[nza] = xi;
colidx[nza] = i;
}
}
jajp1 = rowstr[j+1];
rowstr[j+1] = nza + rowstr[1];
}
}
/*---------------------------------------------------------------------
c generate a sparse n-vector (v, iv)
c having nzv nonzeros
c
c mark(i) is set to 1 if position i is nonzero.
c mark is all zero on entry and is reset to all zero before exit
c this corrects a performance bug found by John G. Lewis, caused by
c reinitialization of mark on every one of the n calls to sprnvc
---------------------------------------------------------------------*/
static void sprnvc(
int n,
int nz,
double v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int nzloc[], /* nzloc[1:n] */
int mark[] ) /* mark[1:n] */
{
int nn1;
int nzrow, nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
nzrow = 0;
nn1 = 1;
do {
nn1 = 2 * nn1;
} while (nn1 < n);
/*--------------------------------------------------------------------
c nn1 is the smallest power of two not less than n
c-------------------------------------------------------------------*/
while (nzv < nz) {
vecelt = randlc(&tran, amult);
/*--------------------------------------------------------------------
c generate an integer between 1 and n in a portable manner
c-------------------------------------------------------------------*/
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n) continue;
/*--------------------------------------------------------------------
c was this integer generated already?
c-------------------------------------------------------------------*/
if (mark[i] == 0) {
mark[i] = 1;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
nzv = nzv + 1;
v[nzv] = vecelt;
iv[nzv] = i;
}
}
for (ii = 1; ii <= nzrow; ii++) {
i = nzloc[ii];
mark[i] = 0;
}
}
/*---------------------------------------------------------------------
* scale a double precision number x in (0,1) by a power of 2 and chop it
*---------------------------------------------------------------------*/
static int icnvrt(double x, int ipwr2) {
return ((int)(ipwr2 * x));
}
/*--------------------------------------------------------------------
c set ith element of sparse vector (v, iv) with
c nzv nonzeros to val
c-------------------------------------------------------------------*/
static void vecset(
int n,
double v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int *nzv,
int i,
double val)
{
int k;
boolean set;
set = FALSE;
for (k = 1; k <= *nzv; k++) {
if (iv[k] == i) {
v[k] = val;
set = TRUE;
}
}
if (set == FALSE) {
*nzv = *nzv + 1;
v[*nzv] = val;
iv[*nzv] = i;
}
}
|
main.c | #include <stdio.h>
#include <omp.h>
#include <string.h>
#include <stdlib.h>
#include <stdbool.h>
// Calculate |𝛢𝑖𝑖| > ∑ |𝐴𝑖𝑗| where j=0…N-1 i<>j
bool strictlyDiagonallyDominant(int *sddArray, int sddArraySize, int chunk) {
int i, j, sum;
bool isSDD = true;
#pragma omp parallel shared(sddArray, sddArraySize, chunk, isSDD) private(i, j, sum) default(none)
{
#pragma omp for schedule(static, chunk)
for (i = 0; i < sddArraySize; i++) {
sum = 0;
for (j = 0; j < sddArraySize; j++) {
if (i != j)
sum += abs(sddArray[i * sddArraySize + j]);
}
if (abs(sddArray[i * sddArraySize + i]) < sum) {
isSDD = false;
}
}
}
#pragma clang diagnostic push
#pragma ide diagnostic ignored "UnreachableCode"
printf(isSDD ? "\nThe matrix is strictly diagonally dominant!\n"
: "\nThe matrix is not strictly diagonally dominant!\n");
#pragma clang diagnostic pop
return isSDD;
}
// Calculate m = max(|𝛢𝑖𝑖|) where i=0…N-1
int maxInDiagonal(int *sddArray, int sddArraySize) {
int i;
int max = abs(sddArray[1 * sddArraySize + 1]);
#pragma omp parallel for private(i) shared(sddArraySize, sddArray) reduction(max:max) default(none)
for (i = 0; i < sddArraySize; ++i) {
{
if (abs(sddArray[i * sddArraySize + i]) > max)
max = abs(sddArray[i * sddArraySize + i]);
}
}
printf("\nThe max element in the diagonal of the matrix is: %d\n", max);
return max;
}
// Create new array where 𝐵𝑖𝑗 = m–|𝐴𝑖𝑗| for i<>j and 𝐵𝑖𝑗 = m for i=j
void createNewArray(int *sddArray, int *sddMaxArray, int sddArraySize, int chunk, int max) {
int i, j;
#pragma omp parallel shared(sddArray, sddArraySize, chunk, sddMaxArray, max) private(i, j) default(none)
{
#pragma omp for schedule(static, chunk)
for (i = 0; i < sddArraySize; i++) {
for (j = 0; j < sddArraySize; j++) {
if (i != j) {
sddMaxArray[i * sddArraySize + j] = max - abs(sddArray[i * sddArraySize + j]);
} else {
sddMaxArray[i * sddArraySize + j] = max;
}
}
}
}
printf("\n-Created new array-\n");
}
// Calculate m = max(|𝛢𝑖𝑖|) where i=0…N-1 with reduction clause
void minInDiagonalWithReduction(int *sddMaxArray, int sddArraySize) {
int i, j;
int min = abs(sddMaxArray[1 * sddArraySize + 1]);
#pragma omp parallel for private(i, j) shared(sddArraySize, sddMaxArray) reduction(min:min) default(none)
for (i = 0; i < sddArraySize; ++i) {
for (j = 0; j < sddArraySize; ++j) {
{
if (abs(sddMaxArray[i * sddArraySize + j]) < min) {
min = abs(sddMaxArray[i * sddArraySize + j]);
}
}
}
}
printf("\n-Calculated with reduction clause-\nThe min element in the matrix is: %d\n", min);
}
// Calculate m = max(|𝛢𝑖𝑖|) where i=0…N-1 with critical area
void minInDiagonalWithCriticalArea(int *sddMaxArray, int sddArraySize, int chunk) {
int i, j;
int min = abs(sddMaxArray[1 * sddArraySize + 1]);
#pragma omp parallel shared(sddArraySize, sddMaxArray, chunk, min) private(i, j) default(none)
{
int minLocal = min;
#pragma omp for schedule(static, chunk)
for (i = 0; i < sddArraySize; ++i)
for (j = 0; j < sddArraySize; ++j)
if (abs(sddMaxArray[i * sddArraySize + j]) < minLocal)
minLocal = abs(sddMaxArray[i * sddArraySize + j]);;
#pragma omp critical
{
if (minLocal < min) min = minLocal;
}
}
printf("\n-Calculated with critical area-\nThe min element in the matrix is: %d\n", min);
}
int main() {
int i, j, numThreads, sddArraySize, chunk, max;
int *sddArray, *sddMaxArray;
double start, end, timeInMilliseconds;
printf("-------------------------------\n");
printf("Parallel Systems - Assignment 1\n");
printf("-------------------------------\n\n");
printf("What array size fits you? N=");
scanf("%d", &sddArraySize);
sddArray = (int *) malloc(sddArraySize * sddArraySize * sizeof(int));
sddMaxArray = (int *) malloc(sddArraySize * sddArraySize * sizeof(int));
printf("\nEnter number of threads for OpenMP: ");
scanf("%d", &numThreads);
printf("\nSetting %d threads", numThreads);
omp_set_num_threads(numThreads);
for (i = 0; i < sddArraySize; i++) {
for (j = 0; j < sddArraySize; j++) {
printf("\nGive element [%d][%d]=", i, j);
scanf("%d", &sddArray[i * sddArraySize + j]);
}
}
chunk = sddArraySize / numThreads;
if (chunk == 0) chunk = 1;
#pragma clang diagnostic push
#pragma ide diagnostic ignored "UnreachableCode"
// Measure performance
start = omp_get_wtime();
if (!strictlyDiagonallyDominant(sddArray, sddArraySize, chunk)) {
exit(1);
}
max = maxInDiagonal(sddArray, sddArraySize);
createNewArray(sddArray, sddMaxArray, sddArraySize, chunk, max);
minInDiagonalWithReduction(sddMaxArray, sddArraySize);
minInDiagonalWithCriticalArea(sddMaxArray, sddArraySize, chunk);
end = omp_get_wtime();
timeInMilliseconds = (end - start) * 1000;
printf("\n-----------------------------------------\n");
printf("Functions executed in %.4f milliseconds\n", timeInMilliseconds);
printf("-----------------------------------------\n");
FILE *f = fopen("results.txt", "a");
if (f == NULL) {
printf("Error opening file!\n");
exit(1);
}
fprintf(f, "N: %d\t Threads: %d\t Time: %.4f ms\n", sddArraySize, numThreads, timeInMilliseconds);
fclose(f);
free(sddArray);
free(sddMaxArray);
return 0;
#pragma clang diagnostic pop
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
/// All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
clang::Module *Module = nullptr;
bool ModuleInterface = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *&Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
Partition, ///< 'module partition X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path);
/// The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(
NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted,
bool IsUnavailable, StringRef Message, bool IsStrict,
StringRef Replacement, AvailabilityMergeKind AMK, int Priority,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf ///< Condition in a constexpr if statement.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
bool IsConstexprSpecified);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, IdentifierInfo *Id,
LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef,
IdentifierInfo *Id,
bool DirectInit, Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation());
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false,
bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *&Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return true if (un)supported features for the current target should be
/// diagnosed if OpenMP (offloading) is enabled.
bool shouldDiagnoseTargetSupportFromOpenMP() const {
return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() ||
isInOpenMPTargetExecutionDirective();
}
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext, QualType BaseType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDefaultedMemberExceptionSpecs.empty() &&
"there shouldn't be any pending delayed defaulted member "
"exception specs");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDefaultedMemberExceptionSpecs)
SavedDefaultedMemberExceptionSpecs;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDefaultedMemberExceptionSpecs.swap(
S.DelayedDefaultedMemberExceptionSpecs);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
reduction_team.c | #include <stdio.h>
#include <omp.h>
#define N 1000000ll
#define SUM (N * (N-1)/2)
void checkHost(int gpu_error, int* errors, long long a){
int host_error = 0;
if (a != SUM){
printf ("Host - Incorrect result = %lld, expected = %lld!\n", a, SUM);
host_error = 1;
(*errors)++;
}
if(!host_error && !gpu_error){
printf("-----> Success\n");
} else{
printf("-----> Failure\n");
}
}
void reduction(int num_teams, const int num_threads, int* errors){
long long a = 0;
int gpu_error = 0;
#pragma omp target teams num_teams(num_teams) thread_limit(num_threads) map(tofrom: a)
{
long long i;
#pragma omp parallel for reduction(+:a)
for (i = 0; i < N; i++) {
a += i;
}
if (a != SUM){
printf ("GPU - Incorrect result = %lld, expected = %lld!\n", a, SUM);
gpu_error = 1;
}
}
checkHost(gpu_error, errors, a);
}
void reduction_256(int num_teams, const int num_threads, int* errors){
long long a = 0;
int gpu_error = 0;
#pragma omp target teams num_teams(num_teams) thread_limit(256) map(tofrom: a)
{
long long i;
#pragma omp parallel for reduction(+:a)
for (i = 0; i < N; i++) {
a += i;
}
if (a != SUM){
printf ("GPU - Incorrect result = %lld, expected = %lld!\n", a, SUM);
gpu_error = 1;
}
}
checkHost(gpu_error, errors, a);
}
void reduction_512(int num_teams, const int num_threads, int* errors){
long long a = 0;
int gpu_error = 0;
#pragma omp target teams num_teams(num_teams) thread_limit(512) map(tofrom: a)
{
long long i;
#pragma omp parallel for reduction(+:a)
for (i = 0; i < N; i++) {
a += i;
}
if (a != SUM){
printf ("GPU - Incorrect result = %lld, expected = %lld!\n", a, SUM);
gpu_error = 1;
}
}
checkHost(gpu_error, errors, a);
}
void reduction_1024(int num_teams, const int num_threads, int* errors){
long long a = 0;
int gpu_error = 0;
#pragma omp target teams num_teams(num_teams) thread_limit(1024) map(tofrom: a)
{
long long i;
#pragma omp parallel for reduction(+:a)
for (i = 0; i < N; i++) {
a += i;
}
if (a != SUM){
printf ("GPU - Incorrect result = %lld, expected = %lld!\n", a, SUM);
gpu_error = 1;
}
}
checkHost(gpu_error, errors, a);
}
int main (void)
{
int errors = 0;
int gpu_error = 0;
printf("\n---------- 1 Team with Variable Threads ----------\n");
printf("\nRunning 1 Team with 64 threads per team\n");
reduction(1, 64, &errors);
printf("\nRunning 1 Team with 128 threads per team\n");
reduction(1, 128, &errors);
//Have to call a different function to use a constant for num_threads because
//a variable will not allow the num_threads to go above 256
printf("\nRunning 1 Team with 256 threads per team\n");
reduction_256(1, 256, &errors);
printf("\nRunning 1 Team with 512 threads per team\n");
reduction_512(1, 512, &errors);
printf("\nRunning 1 Team with 1024 threads per team\n");
reduction_1024(1, 1024, &errors);
if(!errors){
printf("\nRESULT: ALL RUNS SUCCESSFUL!\n");
return 0;
} else{
printf("\nRESULT: FAILURES OCCURED!\n");
return -1;
}
}
|
DRB104-nowait-barrier-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This example is based on one code snippet extracted from a paper:
Ma etc. Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013
Explicit barrier to counteract nowait
*/
#include <stdio.h>
#include <assert.h>
int main()
{
int i,error;
int len = 1000;
int a[len], b=5;
#pragma omp parallel for private(i)
for (i=0; i<len; i++)
a[i]= i;
#pragma omp parallel shared(b, error)
{
#pragma omp for private(i)
for(i = 0; i < len; i++)
a[i] = b + a[i]*5;
}
error = a[9] + 1;
assert (error == 51);
printf ("error = %d\n", error);
return 0;
}
|
mergeSort.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
void merge_array(int *array, int start, int end)
{
int middle = (start + end) / 2;
int temp_index = 0;
/* create a temporary array */
int *temp = malloc(sizeof(int) * (end - start + 1));
/* merge in sorted data from the 2 halves */
int left = start;
int right = middle + 1;
/* while both halves have data */
while ((left <= middle) && (right <= end))
{
/* if the left half value is less than right */
if (array[left] < array[right])
{
/* take from left */
temp[temp_index] = array[left];
temp_index++;
left++;
}
else
{
/* take from right */
temp[temp_index] = array[right];
temp_index++;
right++;
}
}
/* add the remaining elements from the left half */
while (left <= middle)
{
temp[temp_index] = array[left];
temp_index++;
left++;
}
/* add the remaining elements from the right half */
while (right <= end)
{
temp[temp_index] = array[right];
temp_index++;
right++;
}
/* move from temp array to the original array */
int i;
for (i = start; i <= end; i++)
{
array[i] = temp[i - start];
}
/* free the temporary array */
free(temp);
}
void mergeSort(int *array, int start, int end)
{
if (start < end)
{
// printf("Thread %d is sorting %d through %d\n", omp_get_thread_num(), start, end);
int middle = (start + end) / 2;
/* sort both halves in parallel */
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
mergeSort(array, start, middle);
#pragma omp task
mergeSort(array, middle + 1, end);
}
}
/* merge the two halves */
merge_array(array, start, end);
}
} |
parallelReadTiff.c | //#include "tiffio.h"
#include <stdio.h>
#include <stdint.h>
#include "tiffio.h"
#include "omp.h"
#include "mex.h"
//mex -v COPTIMFLAGS="-O3 -fwrapv -DNDEBUG" CFLAGS='$CFLAGS -O3 -fopenmp' LDFLAGS='$LDFLAGS -O3 -fopenmp' '-I/global/home/groups/software/sl-7.x86_64/modules/libtiff/4.1.0/libtiff/' '-L/global/home/groups/software/sl-7.x86_64/modules/libtiff/4.1.0/libtiff/' -ltiff /clusterfs/fiona/matthewmueller/parallelTiffTesting/main.c
//mex COMPFLAGS='$COMPFLAGS /openmp' '-IC:\Program Files (x86)\tiff\include\' '-LC:\Program Files (x86)\tiff\lib\' -ltiffd.lib C:\Users\Matt\Documents\parallelTiff\main.cpp
void DummyHandler(const char* module, const char* fmt, va_list ap)
{
// ignore errors and warnings
}
void* mallocDynamic(uint64_t x, uint64_t bits){
switch(bits){
case 8:
return malloc(x*sizeof(uint8_t));
case 16:
return malloc(x*sizeof(uint16_t));
case 32:
return malloc(x*sizeof(float));
case 64:
return malloc(x*sizeof(double));
default:
printf("Image is not 8/16 bit, single, or double. Using single.");
return malloc(x*sizeof(float));
}
}
void readTiffParallel(uint64_t x, uint64_t y, uint64_t z, char* fileName, void* tiff, uint64_t bits, uint64_t startSlice, uint64_t stripSize){
int32_t numWorkers = omp_get_max_threads();
int32_t batchSize = (z-1)/numWorkers+1;
int32_t w;
uint8_t err = 0;
char errString[10000];
#pragma omp parallel for
for(w = 0; w < numWorkers; w++){
TIFF* tif = TIFFOpen(fileName, "r");
if(!tif){
#pragma omp critical
{
err = 1;
sprintf(errString,"Thread %d: File \"%s\" cannot be opened\n",w,fileName);
}
}
void* buffer = mallocDynamic(x*stripSize, bits);
for(int64_t dir = startSlice+(w*batchSize); dir < startSlice+((w+1)*batchSize); dir++){
if(dir>=z+startSlice || err) break;
uint8_t counter = 0;
while(!TIFFSetDirectory(tif, (uint64_t)dir) && counter<3){
printf("Thread %d: File \"%s\" Directory \"%d\" failed to open. Try %d\n",w,fileName,dir,counter+1);
counter++;
}
for (int64_t i = 0; i*stripSize < y; i++)
{
//loading the data into a buffer
switch(bits){
case 8:
// Map Values to flip x and y for MATLAB
TIFFReadEncodedStrip(tif, i,(uint8_t*)buffer, stripSize*x*(bits/8));
for(int64_t k = 0; k < stripSize; k++){
if((k+(i*stripSize)) >= y) break;
for(int64_t j = 0; j < x; j++){
((uint8_t*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((uint8_t*)buffer)[j+(k*x)];
}
}
break;
case 16:
// Map Values to flip x and y for MATLAB
TIFFReadEncodedStrip(tif, i,(uint16_t*)buffer, stripSize*x*(bits/8));
for(int64_t k = 0; k < stripSize; k++){
if((k+(i*stripSize)) >= y) break;
for(int64_t j = 0; j < x; j++){
((uint16_t*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((uint16_t*)buffer)[j+(k*x)];
}
}
break;
case 32:
// Map Values to flip x and y for MATLAB
TIFFReadEncodedStrip(tif, i,(float*)buffer, stripSize*x*(bits/8));
for(int64_t k = 0; k < stripSize; k++){
if((k+(i*stripSize)) >= y) break;
for(int64_t j = 0; j < x; j++){
((float*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((float*)buffer)[j+(k*x)];
}
}
break;
case 64:
// Map Values to flip x and y for MATLAB
TIFFReadEncodedStrip(tif, i,(double*)buffer, stripSize*x*(bits/8));
for(int64_t k = 0; k < stripSize; k++){
if((k+(i*stripSize)) >= y) break;
for(int64_t j = 0; j < x; j++){
((double*)tiff)[((j*y)+(k+(i*stripSize)))+((dir-startSlice)*(x*y))] = ((double*)buffer)[j+(k*x)];
}
}
break;
}
}
}
free(buffer);
TIFFClose(tif);
}
if(err) mexErrMsgIdAndTxt("tiff:threadError",errString);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[])
{
char* fileName = mxArrayToString(prhs[0]);
TIFFSetWarningHandler(DummyHandler);
TIFF* tif = TIFFOpen(fileName, "r");
if(!tif) mexErrMsgIdAndTxt("tiff:inputError","File \"%s\" cannot be opened",fileName);
uint64_t x = 1,y = 1,z = 1,bits = 1, startSlice = 0;
TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &x);
TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &y);
if(nrhs == 1){
uint16_t s = 0, m = 0, t = 1;
while(TIFFSetDirectory(tif,t)){
s = t;
t *= 8;
if(s > t){
t = 65535;
printf("Number of slices > 32768");
break;
}
}
while(s != t){
m = (s+t+1)/2;
if(TIFFSetDirectory(tif,m)){
s = m;
}
else{
if(m > 0) t = m-1;
else t = m;
}
}
z = s+1;
}
else{
if(mxGetN(prhs[1]) != 2){
mexErrMsgIdAndTxt("tiff:inputError","Input range is not 2");
}
else{
startSlice = (uint64_t)*(mxGetPr(prhs[1]))-1;
z = (uint64_t)*((mxGetPr(prhs[1])+1))-startSlice;
if (!TIFFSetDirectory(tif,startSlice+z-1) || !TIFFSetDirectory(tif,startSlice)){
mexErrMsgIdAndTxt("tiff:rangeOutOfBound","Range is out of bounds");
}
}
}
TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits);
uint64_t stripSize = 1;
TIFFGetField(tif, TIFFTAG_ROWSPERSTRIP, &stripSize);
TIFFClose(tif);
uint64_t dim[3];
dim[0] = y;
dim[1] = x;
dim[2] = z;
if(bits == 8){
plhs[0] = mxCreateNumericArray(3,dim,mxUINT8_CLASS, mxREAL);
uint8_t* tiff = (uint8_t*)mxGetPr(plhs[0]);
readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize);
}
else if(bits == 16){
plhs[0] = mxCreateNumericArray(3,dim,mxUINT16_CLASS, mxREAL);
uint16_t* tiff = (uint16_t*)mxGetPr(plhs[0]);
readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize);
}
else if(bits == 32){
plhs[0] = mxCreateNumericArray(3,dim,mxSINGLE_CLASS, mxREAL);
float* tiff = (float*)mxGetPr(plhs[0]);
readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize);
}
else if(bits == 64){
plhs[0] = mxCreateNumericArray(3,dim,mxDOUBLE_CLASS, mxREAL);
double* tiff = (double*)mxGetPr(plhs[0]);
readTiffParallel(x,y,z,fileName, (void*)tiff, bits, startSlice, stripSize);
}
else{
mexErrMsgIdAndTxt("tiff:dataTypeError","Data type not suppported");
}
}
|
vmult_1_save.c | #define _TH_1 2
#include <omp.h>
void vmult_1(const int M,const int N,const double alpha,const double* val,const int n_rows,const double* src,const int incX,const double beta,double* dst,const int incY,int* rowstart,int* col) {
int row;int j;
/*@;BEGIN(_decl1=DeclStmt)@*/double _dst_cp_0;
omp_set_num_threads(_TH_1);
#pragma omp parallel
{
/*@;BEGIN(Nest1=Nest)@*/#pragma omp for private(row,j,_dst_cp_0)
for (row=0; row<n_rows; row+=1)
{
_dst_cp_0 = dst[row];
/*@;BEGIN(Nest2=Nest)@*/for (j=rowstart[row]; j<rowstart[row+1]; j+=1)
{
_dst_cp_0 = _dst_cp_0+val[j]*src[col[j]];
}
dst[row] = _dst_cp_0;
}
}
}
|
for-15.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void foo()
{
long n = 10;
int i;
#pragma omp for
for (i=0; i < n; ++i) ;
#pragma omp for
for (i=0; n > i; ++i) ;
}
|
lud_omp.c | #include <stdio.h>
#include <omp.h>
extern int omp_num_threads;
#define BS 16
#define AA(_i,_j) a[offset*size+_i*size+_j+offset]
#define BB(_i,_j) a[_i*size+_j]
#ifdef OMP_OFFLOAD
#pragma offload_attribute(push, target(mic))
#endif
void lud_diagonal_omp (float* a, int size, int offset)
{
int i, j, k;
for (i = 0; i < BS; i++) {
for (j = i; j < BS; j++) {
for (k = 0; k < i ; k++) {
AA(i,j) = AA(i,j) - AA(i,k) * AA(k,j);
}
}
float temp = 1.f/AA(i,i);
for (j = i+1; j < BS; j++) {
for (k = 0; k < i ; k++) {
AA(j,i) = AA(j,i) - AA(j,k) * AA(k,i);
}
AA(j,i) = AA(j,i)*temp;
}
}
}
#ifdef OMP_OFFLOAD
#pragma offload_attribute(pop)
#endif
// implements block LU factorization
void lud_omp(float *a, int size)
{
int offset, chunk_idx, size_inter, chunks_in_inter_row, chunks_per_inter;
#ifdef OMP_OFFLOAD
#pragma omp target map(to: size) map(a[0:size*size])
#endif
#ifdef OMP_OFFLOAD
{
omp_set_num_threads(224);
#else
printf("running OMP on host\n");
omp_set_num_threads(omp_num_threads);
#endif
for (offset = 0; offset < size - BS ; offset += BS)
{
// lu factorization of left-top corner block diagonal matrix
//
lud_diagonal_omp(a, size, offset);
size_inter = size - offset - BS;
chunks_in_inter_row = size_inter/BS;
// calculate perimeter block matrices
//
#pragma omp parallel for default(none) \
private(chunk_idx) shared(size, chunks_per_inter, chunks_in_inter_row, offset, a)
for ( chunk_idx = 0; chunk_idx < chunks_in_inter_row; chunk_idx++)
{
int i, j, k, i_global, j_global, i_here, j_here;
float sum;
float temp[BS*BS] __attribute__ ((aligned (64)));
for (i = 0; i < BS; i++) {
#pragma omp simd
for (j =0; j < BS; j++){
temp[i*BS + j] = a[size*(i + offset) + offset + j ];
}
}
i_global = offset;
j_global = offset;
// processing top perimeter
//
j_global += BS * (chunk_idx+1);
for (j = 0; j < BS; j++) {
for (i = 0; i < BS; i++) {
sum = 0.f;
for (k=0; k < i; k++) {
sum += temp[BS*i +k] * BB((i_global+k),(j_global+j));
}
i_here = i_global + i;
j_here = j_global + j;
BB(i_here, j_here) = BB(i_here,j_here) - sum;
}
}
// processing left perimeter
//
j_global = offset;
i_global += BS * (chunk_idx + 1);
for (i = 0; i < BS; i++) {
for (j = 0; j < BS; j++) {
sum = 0.f;
for (k=0; k < j; k++) {
sum += BB((i_global+i),(j_global+k)) * temp[BS*k + j];
}
i_here = i_global + i;
j_here = j_global + j;
a[size*i_here + j_here] = ( a[size*i_here+j_here] - sum ) / a[size*(offset+j) + offset+j];
}
}
}
// update interior block matrices
//
chunks_per_inter = chunks_in_inter_row*chunks_in_inter_row;
#pragma omp parallel for schedule(auto) default(none) \
private(chunk_idx ) shared(size, chunks_per_inter, chunks_in_inter_row, offset, a)
for (chunk_idx =0; chunk_idx < chunks_per_inter; chunk_idx++)
{
int i, j, k, i_global, j_global;
float temp_top[BS*BS] __attribute__ ((aligned (64)));
float temp_left[BS*BS] __attribute__ ((aligned (64)));
float sum[BS] __attribute__ ((aligned (64))) = {0.f};
i_global = offset + BS * (1 + chunk_idx/chunks_in_inter_row);
j_global = offset + BS * (1 + chunk_idx%chunks_in_inter_row);
for (i = 0; i < BS; i++) {
#pragma omp simd
for (j =0; j < BS; j++){
temp_top[i*BS + j] = a[size*(i + offset) + j + j_global ];
temp_left[i*BS + j] = a[size*(i + i_global) + offset + j];
}
}
for (i = 0; i < BS; i++)
{
for (k=0; k < BS; k++) {
#pragma omp simd
for (j = 0; j < BS; j++) {
sum[j] += temp_left[BS*i + k] * temp_top[BS*k + j];
}
}
#pragma omp simd
for (j = 0; j < BS; j++) {
BB((i+i_global),(j+j_global)) -= sum[j];
sum[j] = 0.f;
}
}
}
}
lud_diagonal_omp(a, size, offset);
#ifdef OMP_OFFLOAD
}
#endif
}
|
smecy_mcapi.h | /* SMECY low-level runtime implementation with MCAPI
Ronan.Keryell@silkan.com
*/
/* For the final pause() */
#include <unistd.h>
/* For malloc() and atexit() */
#include <stdlib.h>
/* For memcpy() */
#include <string.h>
/* For... assert() :-) */
#include <assert.h>
/* Needed to display verbose MCAPI error messages: */
#include <stdio.h>
#include <stdarg.h>
#ifdef SMECY_MCAPI_HOST
/* For SMECY_MCAPI_connection */
#include <stdbool.h>
/* To have a lock to protect against concurrent execution on the same
accelerator: */
#include <omp.h>
#endif
/* To use MCAPI from the MultiCore Association */
#include<mcapi.h>
#include<mca.h>
/* Useful for debugging :
void mcapi_set_debug_level (int d) {
mca_set_debug_level (d);
}
void mcapi_display_state (void* handle) {
mcapi_trans_display_state(handle);
}
*/
#ifdef MCAPI_STHORM
/* Use the API to deal with threads on the fabric: */
#include "mcapi_fabric_helper.h"
/* For some timing functions */
//#include "mcapi_time_helper.h"
#endif
#if defined(MCAPI_STHORM)
/* Use the STHORM tracing API... */
#include "mcapi_trace_helper.h"
/* To display strings only in tracing mode: */
#ifdef SMECY_MCAPI_CHECK_TRACE
#define MCAPI_TRACE_MODE_ONLY_C(trace) MCAPI_TRACE_C(trace)
#else
#define MCAPI_TRACE_MODE_ONLY_C(trace)
#endif
#else
/* ...or not */
#define MCAPI_TRACE(args, ...)
#define MCAPI_TRACE_C(trace)
#define MCAPI_TRACE_CI(trace,val)
#define MCAPI_TRACE_CF(trace,val)
#define MCAPI_TRACE_CC(trace,msg)
#define MCAPI_TRACE_CP(trace,add)
#define MCAPI_TRACE_CS(trace,status)
#endif
/* MCAPI initialization parameters */
#ifdef MCAPI_STHORM
/* This is how we specify the library to launch on the STHORM accelerator
fabric */
static mcapi_param_t SMECY_mcapi_param_init = "libsmecy_accel_fabric.so";
#define SMECY_MCAPI_PARAM_INIT (&SMECY_mcapi_param_init)
#else
#define SMECY_MCAPI_PARAM_INIT NULL
#endif
/* A macro to get debug information on a channel handle such as
receive_gate */
#ifdef MCAPI_STHORM
/* Peek into the implementation-specific mcapi_chan_hndl_t to get a
field: */
#define SMECY_CHAN_INFO(handle) ((intptr_t)handle.queueDesc)
#else
/* Else, guess it is a pointer or int-like type: */
#define SMECY_CHAN_INFO(handle) ((intptr_t)handle)
#endif
/* Compute the port used used on the host side to communicate for RX or TX
on MCAPI domain node*/
#define SMECY_MCAPI_PORT(TX_or_RX,domain,node) \
(SMECY_MCAPI_HOST_##TX_or_RX##_STARTING_PORT + SMECY_PE_NB*domain + node)
/* Machine description */
enum {
/* The STHORM geometry */
SMECY_CLUSTER_NB = 4,
SMECY_PE_NB = 16,
/* The localization of the host inside the MCAPI realm: */
SMECY_MCAPI_HOST_DOMAIN = 5,
SMECY_MCAPI_HOST_NODE = 0,
/* The port numbers to connect a PE to a host. Since a PE is only
connected to the host, only to endpoints and thus ports are needed. */
SMECY_MCAPI_PE_TX_PORT = 1,
SMECY_MCAPI_PE_RX_PORT = 2,
/* Since MCAPI does not allow multiple connections on a same port, use
on the host a different port to connect to each PE. */
/* There are not a lot of ports on STHORM, so use the lower number to
start: */
SMECY_MCAPI_HOST_TX_STARTING_PORT = 0x0,
/* Allocate the reception ports just after the transmission ones: */
SMECY_MCAPI_HOST_RX_STARTING_PORT =
SMECY_MCAPI_PORT(TX,SMECY_CLUSTER_NB,0),
/* Maximum byte size of a message used as a strip-mining size: */
SMECY_MCAPI_STRIPMINE_SIZE = 4050,
};
/* To get a printf-like feedback.
Since MCAPI_TRACE_C add a new line, add a "\n" to non MCAPI_STHORM case
too
*/
static SMECY_printf_varargs(const char *format, va_list ap) {
#ifdef MCAPI_STHORM
/* Well, cannot use asprintf on MCAPI... */
static char big_message[1000];
vsnprintf(big_message, sizeof(big_message), format, ap);
MCAPI_TRACE_C(big_message);
#else
vfprintf(stderr, format, ap);
/* Note that the new line is not atomic because it is in another
printf... */
fprintf(stderr, "\n");
#endif
}
static SMECY_printf(const char *format, ...) {
va_list ap;
va_start(ap, format);
SMECY_printf_varargs(format, ap);
va_end(ap);
}
#ifdef SMECY_VERBOSE
#include <stdio.h>
/* Prefix all the debug messages with "SMECY: " to ease filtering */
#define SMECY_PRINT_VERBOSE_RAW(...) \
SMECY_printf("SMECY: " __VA_ARGS__)
/* With a ; to allow a statement or a declaration afterards
The comment on 2 lines at the end is to force a poor-man formating of
the output when using cpp -CC (preprocess but keep the comments) to
separate better the debug message from the real code
*/
#define SMECY_PRINT_VERBOSE(...) SMECY_PRINT_VERBOSE_RAW(__VA_ARGS__);/*
*/
#define SMECY_PRINT_VERBOSE_COMMA(...) \
/* The , instead of ; is to have a single statement with the statement \
following this macro. It allows for example to have this verbose \
macro between a #pragma omp section and the real statement. Do not \
work before a declaration... */ \
SMECY_PRINT_VERBOSE_RAW(__VA_ARGS__),/*
*/
#else
#define SMECY_PRINT_VERBOSE(...)
#define SMECY_PRINT_VERBOSE_COMMA(...)
#endif
/* Test error code.
Display and exit on failure
*/
void static SMECY_MCAPI_check_status(mcapi_status_t status,
char file[],
const char function[],
int line,
const char *format,
...) {
#ifndef SMECY_MCAPI_CHECK_TRACE
if (status != MCAPI_SUCCESS) {
/* Something went wrong */
#endif
/* If SMECY_MCAPI_CHECK_TRACE is set for asking trace mode, display
verbose status message even without any error */
if (format[0] != '\0') {
/* We have a message to display */
SMECY_printf(" - From file '%s', function '%s', line %d:",
file, function, line);
va_list ap;
va_start(ap, format);
SMECY_printf_varargs(format, ap);
va_end(ap);
}
#ifdef SMECY_MCAPI_CHECK_TRACE
if (status != MCAPI_SUCCESS) {
/* Something went wrong */
#endif
#ifdef SMECY_VERBOSE
/* Use a function from Linux MCAPI implementation to get a string
translation of the status: */
#ifndef MCAPI_MAX_STATUS_SIZE
#define MCAPI_MAX_STATUS_SIZE 250
#endif
#ifdef MCAPI_STHORM
//#define puts(...)
char *message = "";
#else
char message[MCAPI_MAX_STATUS_SIZE];
mcapi_display_status(status, message, MCAPI_MAX_STATUS_SIZE);
#endif
SMECY_printf("API call fails in file '%s', function '%s',"
" line %d with error:\n\t%s",
file, function, line, message);
#ifdef MCAPI_STHORM
/* Rely on the STHORM MCAPI tracing API to display the status */
MCAPI_TRACE_C(format);
MCAPI_TRACE_CS(function, status);
MCAPI_TRACE_CI(file, line);
#endif
#endif
#ifndef MCAPI_STHORM
/* Exit and forward the error code to the OS: */
exit(status);
#else
/* Well, there is no exit() on STHORM, so loop around instead of going
on with nasty side effects... */
MCAPI_TRACE_C("This thread is waiting for ever because of an error.\n");
for(;;) ;
#endif
}
/* Go on, no error */
return;
}
/* The wrapping macro for MCAPI_check_status to capture the call site
information */
#define SMECY_MCAPI_CHECK_STATUS(status) \
SMECY_MCAPI_check_status(status, __FILE__, __func__, __LINE__, "")
#define SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, ...) \
SMECY_MCAPI_check_status(status, __FILE__, __func__, __LINE__, __VA_ARGS__)
/* SMECY_IMP_ are the real implementations doing the real work, to be
defined somewhere else. */
/* Implementation macros to deal with mapping and function executions */
// Create a unique variable name used to pass an argument to function
#define SMECY_IMP_VAR_ARG(func, arg, pe, ...) \
p4a_##pe##_##func##_##arg
// Create a unique variable name for a message
#define SMECY_IMP_VAR_MSG(func, arg, pe, ...) \
p4a_##pe##_##func##_##arg##_msg
// SMECY_CONCAT(SMECY_CONCAT(p4a_##pe##_,SMECY_CONCATENATE(__VA_ARGS__)),##_##func##_##arg)
/* Wrapper that can be used for example to launch the function in another
thread */
#define SMECY_IMP_LAUNCH_WRAPPER(func_call) func_call
// Implementations for the SMECY library on MCAPI
/*
Communications are based on MCAPI packet channels, the MCAPI
connected-mode connections.
The host open connections on-demand to the accelerator PEs and cache the
connections to avoid spending time every-time.
A PE tries to open a connection with the host and is blocked until the
host need it. The the PE enter an infinite dispatching loop so there is
no caching needed here (hopefuly, since the accelerators are not that
memory proficient...).
*/
/* Start a connection request */
static void
SMECY_MCAPI_connect(mcapi_endpoint_t pkt_send, mcapi_endpoint_t pkt_receive) {
mcapi_request_t handle;
mcapi_status_t status;
size_t size;
/* Connect both ends */
mcapi_pktchan_connect_i(pkt_send, pkt_receive, &handle, &status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_pktchan_connect_i "
"on send endpoint %#tx from receive "
"endpoint %#tx with handle %#tx",
(intptr_t)pkt_send, (intptr_t)pkt_receive,
(intptr_t)handle);
/* ...and wait for its completion */
mcapi_wait(&handle, &size, MCAPI_TIMEOUT_INFINITE, &status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_wait on handle %#tx "
"returned size %#zx", (intptr_t)handle,
size);
}
/* Create a packet channel for reception listening on receive_port */
mcapi_pktchan_recv_hndl_t static
SMECY_MCAPI_receive_gate_create(mcapi_port_t receive_port,
mcapi_domain_t send_domain,
mcapi_node_t send_node,
mcapi_port_t send_port) {
mcapi_status_t status;
/* Create the local endpoint for reception */
mcapi_endpoint_t pkt_receive = mcapi_endpoint_create(receive_port, &status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_endpoint_create"
" with receive_port %#tx returns"
" pkt_receive %#tx",
(intptr_t)receive_port,
(intptr_t)pkt_receive);
#if defined(MCAPI_STHORM) && !defined(SMECY_MCAPI_HOST)
/* On STHORM fabric side, it looks like we need to precise this
attribute to have a connection with the host working */
mcapi_endp_attr_memory_type_t memtype = MCAPI_ENDP_ATTR_REMOTE_MEMORY;
mcapi_endpoint_set_attribute(pkt_receive,
MCAPI_ENDP_ATTR_MEMORY_TYPE,
&memtype,
sizeof(mcapi_endp_attr_memory_type_t),
&status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_endpoint_set_attribute "
"MCAPI_ENDP_ATTR_REMOTE_MEMORY");
#endif
mcapi_request_t handle;
#ifdef SMECY_MCAPI_HOST
/* Choose to do the connection request on the host size to spare
resources on the accelerator side */
/* Get the remote end point. Wait if it is not created at the receive
side */
mcapi_endpoint_t pkt_send = mcapi_endpoint_get(send_domain,
send_node,
send_port,
MCA_INFINITE, &status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_endpoint_get "
"on send domain %#tx, node %#tx and "
"port %#tx returns pkt_send %#tx",
(intptr_t)send_domain, (intptr_t)send_node,
(intptr_t)send_port, (intptr_t)pkt_send);
SMECY_MCAPI_connect(pkt_send, pkt_receive);
#endif
mcapi_pktchan_recv_hndl_t receive_gate;
/* Start a connection request... */
mcapi_pktchan_recv_open_i(&receive_gate, pkt_receive, &handle, &status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_pktchan_recv_open_i "
"on receive port %#tx on gate %#tx and "
"handle %#tx", (intptr_t)pkt_receive,
SMECY_CHAN_INFO(receive_gate),
(intptr_t)handle);
/* ...and wait for its completion */
size_t size;
mcapi_wait(&handle, &size, MCAPI_TIMEOUT_INFINITE, &status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_wait on handle %#tx "
"returned size %#zx", (intptr_t)handle, size);
return receive_gate;
}
/* Create a packet channel for transmission */
mcapi_pktchan_send_hndl_t static
SMECY_MCAPI_send_gate_create(mcapi_port_t send_port,
mcapi_domain_t receive_domain,
mcapi_node_t receive_node,
mcapi_port_t receive_port) {
mcapi_status_t status;
/* Create the local port to send the data */
mcapi_endpoint_t pkt_send = mcapi_endpoint_create(send_port, &status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_endpoint_create "
"on send port %#tx returns pkt_send %#tx",
(intptr_t)send_port, (intptr_t)pkt_send);
#if defined(MCAPI_STHORM) && !defined(SMECY_MCAPI_HOST)
/* On STHORM fabric side, it looks like we need to precise this
attribute to have a connection with the host working */
mcapi_endp_attr_memory_type_t memtype = MCAPI_ENDP_ATTR_REMOTE_MEMORY;
mcapi_endpoint_set_attribute(pkt_send,
MCAPI_ENDP_ATTR_MEMORY_TYPE,
&memtype,
sizeof(mcapi_endp_attr_memory_type_t),
&status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_endpoint_set_attribute "
"MCAPI_ENDP_ATTR_REMOTE_MEMORY");
#endif
mcapi_request_t handle;
size_t size;
#ifdef SMECY_MCAPI_HOST
/* Choose to do the connection request on the host size to spare
resources on the accelerator side */
/* Get the remote end point. Wait if it is not created at the receive
side */
mcapi_endpoint_t pkt_receive = mcapi_endpoint_get(receive_domain,
receive_node,
receive_port,
MCA_INFINITE, &status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_endpoint_get "
"on receive domain %#tx, node %#tx and "
"port %#tx returns pkt_receive %#tx",
(intptr_t)receive_domain, (intptr_t)receive_node,
(intptr_t)receive_port, (intptr_t)pkt_receive);
SMECY_MCAPI_connect(pkt_send, pkt_receive);
#endif
mcapi_pktchan_send_hndl_t send_gate;
/* Open this side of the channel for sending... */
mcapi_pktchan_send_open_i(&send_gate, pkt_send, &handle, &status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_pktchan_send_open_i "
"send gate %#tx, send endpoint %#tx "
"with handle %#tx",
SMECY_CHAN_INFO(send_gate),
(intptr_t)pkt_send,
(intptr_t)handle);
/* And wait for the opening */
mcapi_wait(&handle, &size, MCAPI_TIMEOUT_INFINITE, &status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_wait on handle %#tx "
"returned size %#zx", (intptr_t)handle,
size);
return send_gate;
}
/* Since MCAPI has some limitations on the packet size, implement
strip-mining versions of the communication libraries than call. */
/* Send size bytes starting at addr to the recipient: */
void SMECY_MCAPI_send(mcapi_pktchan_send_hndl_t recipient,
const void *addr,
size_t size) {
intptr_t remaining_size = size;
int packet_number = 0;
for (const void * p = addr;
remaining_size > 0;
remaining_size -= SMECY_MCAPI_STRIPMINE_SIZE,
p += SMECY_MCAPI_STRIPMINE_SIZE) {
intptr_t packet_size = SMECY_MIN(remaining_size,
SMECY_MCAPI_STRIPMINE_SIZE);
mcapi_status_t status;
/* Send the data packet to the PE */
mcapi_pktchan_send(recipient, p, packet_size, &status);
/* Check the correct execution */
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status,
"mcapi_pktchan_send to send gate %#tx %p "
"of length %#zx (piece #%d of length "
"%#zx starting at %p)",
SMECY_CHAN_INFO(recipient), addr, size,
++packet_number, packet_size, p);
}
}
/* Receive size bytes starting at the given addr from the sender.
addr must point to a previously allocated memory zone.
Note that, since mcapi_pktchan_recv() gives allocated memory for one
message, for short message, it may be more interesting not to use this
procedure that imply a redundant copy.
*/
void SMECY_MCAPI_receive(mcapi_pktchan_recv_hndl_t sender,
void *addr,
size_t size) {
intptr_t remaining_size = size;
int packet_number = 0;
for (void * p = addr;
remaining_size > 0;
remaining_size -= SMECY_MCAPI_STRIPMINE_SIZE,
p += SMECY_MCAPI_STRIPMINE_SIZE) {
/* Compute the size as set by SMECY_MCAPI_send: */
intptr_t predicted_size = SMECY_MIN(remaining_size,
SMECY_MCAPI_STRIPMINE_SIZE);
size_t received_size;
void * message;
mcapi_status_t status;
mcapi_pktchan_recv(sender, &message, &received_size, &status);
/* Check the correct execution */
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_pktchan_recv from receive "
"gate %#tx %p of length %#zx (piece #%d "
"starting at %p, predicted size = %#zx, "
"received size %#zx to be store at %p)",
SMECY_CHAN_INFO(sender), addr, size,
++packet_number,
message, predicted_size, received_size, p);
assert(received_size == predicted_size);
/* Store the received message into the destination */
memcpy(p, message, predicted_size);
/* Give back the memory buffer to the API for recycling */
mcapi_pktchan_release(message, &status);
/* Check the correct execution */
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "mcapi_pktchan_release %p",
message);
}
}
/* Analyze the PE type and coordinates by redirect to the function that
knows about the "pe" accelerator */
#define SMECY_MCAPI_PARSE_PE(pe, ...) \
SMECY_MCAPI_PARSE_PE_##pe(__VA_ARGS__)
/* Analyze the STHORM coordinates: domain and node numbers */
#define SMECY_MCAPI_PARSE_PE_STHORM(d, n) \
mcapi_domain_t domain = d;/*
*/ \
mcapi_node_t node = n
/* Map a 1-dimension PE number on MCAPI */
#define SMECY_MCAPI_PARSE_PE_PE(p) \
/* Just select domain 0 */ \
SMECY_MCAPI_PARSE_PE_STHORM(0, p)
#if 0
/* Do not allow the map(Host...) since it leads to deadlocks (we are
already on the host!) */
/* Analyze the Host coordinates, which are not specified in the pragma.
Replace them with the host MCAPI node */
#define SMECY_MCAPI_PARSE_PE_Host() \
mcapi_domain_t domain = SMECY_MCAPI_HOST_DOMAIN;/*
*/ \
mcapi_node_t node = SMECY_MCAPI_HOST_NODE
#endif
#ifdef SMECY_MCAPI_HOST
/* Scoreboard used to keep the status of a connection between the host
and each PE.
Since it is a global array, it is default-initialized to 0, which is good.
TODO: use a bitmask instead of a char array to save memory. But more
complex to insure atomicity at the bit level...
*/
struct {
/* Keep track of a connection between the host and a given
accelerator: */
bool opened;
mcapi_pktchan_send_hndl_t transmit;
mcapi_pktchan_recv_hndl_t receive;
/* The lock to protect against concurrent execution on the same
accelerator: */
omp_lock_t accelerator_in_use;
} SMECY_MCAPI_connection[SMECY_CLUSTER_NB][SMECY_PE_NB];
#endif
static void SMECY_IMP_finalize() {
mcapi_status_t status;
/* Release the API use */
mcapi_finalize(&status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "Finalizing MCAPI");
}
#ifdef SMECY_MCAPI_HOST
/* Initialize all the MCAPI run-time on the host and register the
finalizing on program exit() */
static void SMECY_IMP_initialize_then_finalize() {
//mcapi_node_attributes_t node_attributes;
mcapi_info_t info;
mcapi_status_t status;
#ifdef SMECY_MCA_API_DEBUG_LEVEL
/* Set the requested debug level of the MCA API itself */
mcapi_set_debug_level(SMECY_MCA_API_DEBUG_LEVEL);
#endif
// init node attributes. Not clear in which MCAPI version it is needed...
/* It looks like in the Linux MCAPI implementation reference from MCA,
even the 2.015 version looks like a V1 interface... */
#if (MCAPI_VERSION >= 2000)
mcapi_node_attributes_t node_attributes;
mcapi_node_init_attributes(&node_attributes, &status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "Initializing MCAPI attributes");
/* 6 arguments in V.2 */
mcapi_initialize(SMECY_MCAPI_HOST_DOMAIN, SMECY_MCAPI_HOST_NODE,
&node_attributes, SMECY_MCAPI_PARAM_INIT, &info, &status);
MCAPI_TRACE_MODE_ONLY_C("Host initialization V.2 done");
#else
/* 5 arguments in V.1 */
mcapi_initialize(SMECY_MCAPI_HOST_DOMAIN, SMECY_MCAPI_HOST_NODE,
SMECY_MCAPI_PARAM_INIT, &info, &status);
MCAPI_TRACE_MODE_ONLY_C("Host initialization V.1 done");
#endif
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "Initializing MCAPI on domain"
" %#tx and node %#tx",
(intptr_t) SMECY_MCAPI_HOST_DOMAIN,
(intptr_t) SMECY_MCAPI_HOST_NODE);
/* Note that every accelerator is free for execution: */
for(int smecy_cluster = 0;
smecy_cluster != SMECY_CLUSTER_NB;
++smecy_cluster)
for(int smecy_pe = 0; smecy_pe != SMECY_PE_NB; ++smecy_pe)
omp_init_lock(&SMECY_MCAPI_connection[smecy_cluster][smecy_pe].accelerator_in_use);
/* And the register the finalization for an execution at the end of the
program execution */
/* Well, since the execution model is with PEs waiting for the host to
have some work, if the host call finalize, it will wait for the PEs
waiting for it... Dead lock! So, in a first version, do not call
finalize and just exit.
A nasty side effect with current STHORM MCAPI is this message:
terminate called after throwing an instance of 'SystemTrace::SystemTraceException'
what(): Attempt to close the output file has failed
*/
// atexit(SMECY_IMP_finalize);
}
#endif
#ifdef SMECY_MCAPI_HOST
/* Open some MCAPI connections with the requested node
Only create a connection once by using a scoreboard to keep connection
status to each PE.
TODO: split this macro with sub-functions
*/
#define SMECY_IMP_set(func, instance, pe, ...) \
SMECY_LBRACE /* To have local variables
*/ \
mcapi_status_t SMECY_MCAPI_status; /*
Analyze the PE type and coordinates into domain & node */ \
SMECY_MCAPI_PARSE_PE(pe, __VA_ARGS__); \
/* The handle to sending packets
*/ \
mcapi_pktchan_send_hndl_t P4A_transmit; \
/* The handle to receive packets
*/ \
mcapi_pktchan_recv_hndl_t P4A_receive; \
/* Do some per accelerator locking to be thread safe on the caching
system but also to be sure there are no more than one function
executed on an accelerator at a time.
*/ \
omp_set_lock(&SMECY_MCAPI_connection[domain][node].accelerator_in_use); \
SMECY_PRINT_VERBOSE("SMECY_IMP_set: got the lock for this " \
"accelerator for instance %d of function " \
"\"%s\" on processor \"%s\" n° \"%s\"", \
instance, #func, #pe, #__VA_ARGS__); \
/* No need for an OpenMP flush because of the critical section */ \
if (SMECY_MCAPI_connection[domain][node].opened) { \
P4A_transmit = SMECY_MCAPI_connection[domain][node].transmit; \
P4A_receive = SMECY_MCAPI_connection[domain][node].receive; \
SMECY_PRINT_VERBOSE("SMECY_IMP_set: connection cache hit for " \
"domain %d, node %d: P4A_transmit = %#tx," \
" P4A_receive = %#tx", \
domain, node, SMECY_CHAN_INFO(P4A_transmit), \
SMECY_CHAN_INFO(P4A_receive)); \
} \
else { \
/* This is not already opened, create the connections.
Do it in this order compared with the PE
to avoid dead-locks on opening: first open
a connection to send data to the PE */ \
P4A_transmit = SMECY_MCAPI_send_gate_create(SMECY_MCAPI_PORT(TX,domain,node), \
domain, \
node, \
SMECY_MCAPI_PE_RX_PORT); /*
Then open a connection to receive data from the PE */ \
P4A_receive = SMECY_MCAPI_receive_gate_create(SMECY_MCAPI_PORT(RX,domain,node), \
domain, \
node, \
SMECY_MCAPI_PE_TX_PORT);/*
No need for an OpenMP flush because of the lock around */ \
SMECY_MCAPI_connection[domain][node].transmit = P4A_transmit; \
SMECY_MCAPI_connection[domain][node].receive = P4A_receive; \
SMECY_MCAPI_connection[domain][node].opened = true; \
} \
/* Send the function name to run to the remode dispatcher,
including the final '\0' */ \
size_t length = strlen(#func) + 1; \
mcapi_pktchan_send(P4A_transmit, \
#func, \
length, \
&SMECY_MCAPI_status); \
/* Check the correct execution
*/ \
SMECY_MCAPI_CHECK_STATUS_MESSAGE(SMECY_MCAPI_status, \
"mcapi_pktchan_send to send gate" \
" %#tx '%s' of length %#tx", \
SMECY_CHAN_INFO(P4A_transmit), \
#func, length); \
/* The size of some received data
*/ \
size_t P4A_received_size
#else
/* This is on the accelerator side, directed here by the dispatcher.
If it is necessary to call this PE at the same time from different
caller threads, one need to use different ports...
*/
#define SMECY_IMP_set(func, instance, pe, ...) \
SMECY_LBRACE /* <- '{' To have local variables
*/ \
mcapi_status_t SMECY_MCAPI_status; \
/* The size of some received data
*/ \
size_t P4A_received_size
#endif
#ifdef SMECY_MCAPI_HOST
#define SMECY_IMP_accelerator_end(func, instance, pe, ...) \
/* End of the accelerated part.
Release the lock on this accelerator to allow another
thread to use it.
*/ \
omp_unset_lock(&SMECY_MCAPI_connection[domain][node].accelerator_in_use); \
SMECY_RBRACE
#else
/* This is on the accelerator side */
#define SMECY_IMP_accelerator_end(func, instance, pe, ...) \
SMECY_RBRACE \
/* End of the accelerated part: go back to the dispatcher */
#endif
#ifdef SMECY_MCAPI_HOST
#define SMECY_IMP_send_arg(func, arg, type, value, pe, ...) \
/* Use an intermediate variable to be able to have an address on it
if literal is given */ \
type SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__) = value; \
/* Send the scalar data to the PE
*/ \
mcapi_pktchan_send(P4A_transmit, \
&SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__), \
sizeof(type), \
&SMECY_MCAPI_status); \
/* Check the correct execution
*/ \
SMECY_MCAPI_CHECK_STATUS_MESSAGE(SMECY_MCAPI_status, "mcapi_pktchan_send " \
"to send gate %#tx %p of length %#zx", \
SMECY_CHAN_INFO(P4A_transmit), \
&SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__), sizeof(type));
#else
/* This is on the accelerator side */
#define SMECY_IMP_send_arg(func, arg, type, value, pe, ...) \
/* A pointer that will point to the received message */ \
type* SMECY_IMP_VAR_MSG(func, arg, pe, __VA_ARGS__); \
/* Receive the packet with the value
*/ \
mcapi_pktchan_recv(P4A_receive, \
(void **)&SMECY_IMP_VAR_MSG(func,arg,pe,__VA_ARGS__), \
&P4A_received_size, \
&SMECY_MCAPI_status); \
/* Check the correct execution
*/ \
SMECY_MCAPI_CHECK_STATUS_MESSAGE(SMECY_MCAPI_status, "mcapi_pktchan_recv " \
"from receive gate %#tx %p of length %#zx", \
SMECY_CHAN_INFO(P4A_receive), \
(void **)&SMECY_IMP_VAR_MSG(func,arg,pe,__VA_ARGS__), \
P4A_received_size); \
/* Store the value in the argument to be given to the function
call */ \
type SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__) = \
*SMECY_IMP_VAR_MSG(func,arg,pe,__VA_ARGS__)
#endif
#ifdef SMECY_MCAPI_HOST
#define SMECY_IMP_cleanup_send_arg(func, arg, type, value, pe, ...) \
/* Nothing to do for SMECY_cleanup_send_arg */
#else
/* This is on the accelerator side */
#define SMECY_IMP_cleanup_send_arg(func, arg, type, value, pe, ...) \
/* Give back the memory buffer to the API for recycling
*/ \
mcapi_pktchan_release(SMECY_IMP_VAR_MSG(func,arg,pe,__VA_ARGS__), \
&SMECY_MCAPI_status); \
/* Check the correct execution
*/ \
SMECY_MCAPI_CHECK_STATUS_MESSAGE(SMECY_MCAPI_status, \
"mcapi_pktchan_release %p", \
SMECY_IMP_VAR_MSG(func,arg,pe,__VA_ARGS__))
#endif
#ifdef SMECY_MCAPI_HOST
#define SMECY_IMP_send_arg_vector(func, arg, type, addr, size, pe, ...) \
SMECY_PRINT_VERBOSE("Sending vector of %zd elements of %s at address" \
" %p from arg #%d of function \"%s\" on " \
"processor \"%s\" n° \"%s\"", (size_t) size, \
#type, addr, arg, #func, #pe, #__VA_ARGS__) \
/* Send the vector data to the PE
*/ \
SMECY_MCAPI_send(P4A_transmit, addr, size*sizeof(type))
#else
/* This is on the accelerator side */
#define SMECY_IMP_send_arg_vector(func, arg, type, addr, size, pe, ...) \
/* Allocate the memory argument given to the function to receive
the data from the execution */ \
type SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__)[size]; \
SMECY_PRINT_VERBOSE("Sending vector of %zd elements of %s at address" \
" %p from arg #%d of function \"%s\" on " \
"processor \"%s\" n° \"%s\"", (size_t) size, \
#type, \
SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__), \
arg, #func, #pe, #__VA_ARGS__) \
/* Receive the packet with the value
*/ \
SMECY_MCAPI_receive(P4A_receive, \
SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__), \
sizeof(SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__)))
#endif
#ifdef SMECY_MCAPI_HOST
#define SMECY_IMP_cleanup_send_arg_vector(func, arg, type, addr, size, pe, ...) \
SMECY_PRINT_VERBOSE("Deal with post-sending vector " \
"of %zd elements of %s at address" \
" %p from arg #%d of function \"%s\" on " \
"processor \"%s\" n° \"%s\"", (size_t) size, \
#type, addr, arg, #func, #pe, #__VA_ARGS__) \
/* Nothing to do for SMECY_cleanup_send_arg_vector */
#else
/* This is on the accelerator side */
#define SMECY_IMP_cleanup_send_arg_vector(func, arg, type, addr, size, pe, ...) \
SMECY_PRINT_VERBOSE("Deal with post-sending vector " \
"of %zd elements of %s at address" \
" %p from arg #%d of function \"%s\" on " \
"processor \"%s\" n° \"%s\"", (size_t) size, \
#type, \
SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__), \
arg, #func, #pe, #__VA_ARGS__)
#endif
#define SMECY_IMP_update_arg_vector(func, arg, type, addr, size, pe, ...) \
TODO_SMECY_IMP_update_arg_vector
#define SMECY_IMP_cleanup_update_arg_vector(func, arg, type, addr, size, pe, ...) \
TODO_SMECY_IMP_cleanup_update_arg_vector
#ifdef SMECY_MCAPI_HOST
#define SMECY_IMP_launch(func, n_args, pe, ...) \
/* Nothing to launch: it is done on the accelerator side */
#else
/* This is on the accelerator side */
#define SMECY_IMP_launch(func, n_args, pe, ...) \
SMECY_IMP_launch_##n_args(func, pe, __VA_ARGS__)
#endif
#ifdef SMECY_MCAPI_HOST
#define SMECY_IMP_prepare_get_arg_vector(func, arg, type, addr, size, pe, ...) \
SMECY_PRINT_VERBOSE("Preparing to receive vector of %zd elements " \
"of %s at address %p from arg #%d of " \
"function \"%s\" on processor \"%s\" n° \"%s\"", \
(size_t) size, #type, addr, arg, \
#func, #pe, #__VA_ARGS__) \
/* Nothing to do */
#else
/* This is on the accelerator side */
#define SMECY_IMP_prepare_get_arg_vector(func, arg, type, addr, size, pe, ...) \
/* Allocate the memory given to the function to receive the data
from the execution */ \
type SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__)[size]; \
/* Display the address only now since it is defined by the previous
allocation. */ \
SMECY_PRINT_VERBOSE("Preparing to receive vector of %zd elements " \
"of %s at address %p from arg #%d of " \
"function \"%s\" on processor \"%s\" n° \"%s\"", \
(size_t) size, #type, \
SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__), \
arg, #func, #pe, #__VA_ARGS__)
#endif
#ifdef SMECY_MCAPI_HOST
#define SMECY_IMP_get_arg_vector(func, arg, type, addr, size, pe, ...) \
SMECY_PRINT_VERBOSE("Receiving vector of %zd elements of %s at address" \
" %p from arg #%d of function \"%s\" on " \
"processor \"%s\" n° \"%s\"", (size_t) size, \
#type, addr, arg, #func, #pe, #__VA_ARGS__) \
/* Receive the vector result from the accelerator
*/ \
SMECY_MCAPI_receive(P4A_receive, addr, size*sizeof(type));
#else
/* This is on the accelerator side */
#define SMECY_IMP_get_arg_vector(func, arg, type, addr, size, pe, ...) \
SMECY_PRINT_VERBOSE("Receiving vector of %zd elements of %s at address" \
" %p from arg #%d of function \"%s\" on " \
"processor \"%s\" n° \"%s\"", (size_t) size, \
#type, SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__), \
arg, #func, #pe, #__VA_ARGS__) \
/* Send the vector data given by the function execution back to the host
*/ \
SMECY_MCAPI_send(P4A_transmit, \
SMECY_IMP_VAR_ARG(func, arg, pe, __VA_ARGS__), \
size*sizeof(type));
#endif
/* TODO: To be implemented... */
#define SMECY_IMP_get_return(func, type, pe, ...) \
TODO_SMECY_IMP_get_return
/* Implementation of the function calls themselves */
/* Call a function without parameter */
#define SMECY_IMP_launch_0(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func())
/* For the recurrence afterwards: no parameter when 0 parameter :-) */
#define SMECY_IMP_ARG_launch_0(func, pe, ...)
/* Call a function with 1 parameter */
#define SMECY_IMP_ARG_launch_1(func, pe, ...) SMECY_IMP_VAR_ARG(func, 1, pe, __VA_ARGS__)
#define SMECY_IMP_launch_1(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func(SMECY_IMP_ARG_launch_1(func, pe, __VA_ARGS__)))
/* Declare the launchers for function calls with 2 and more parameters by
recurrence. Each time, add a parameter to the list of parameter of the
previous call */
#define SMECY_IMP_ARG_launch_2(func, pe, ...) SMECY_IMP_ARG_launch_1(func, pe, __VA_ARGS__),SMECY_IMP_VAR_ARG(func, 2, pe, __VA_ARGS__)
#define SMECY_IMP_launch_2(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func(SMECY_IMP_ARG_launch_2(func, pe, __VA_ARGS__)))
#define SMECY_IMP_ARG_launch_3(func, pe, ...) SMECY_IMP_ARG_launch_2(func, pe, __VA_ARGS__),SMECY_IMP_VAR_ARG(func, 3, pe, __VA_ARGS__)
#define SMECY_IMP_launch_3(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func(SMECY_IMP_ARG_launch_3(func, pe, __VA_ARGS__)))
#define SMECY_IMP_ARG_launch_4(func, pe, ...) SMECY_IMP_ARG_launch_3(func, pe, __VA_ARGS__),SMECY_IMP_VAR_ARG(func, 4, pe, __VA_ARGS__)
#define SMECY_IMP_launch_4(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func(SMECY_IMP_ARG_launch_4(func, pe, __VA_ARGS__)))
#define SMECY_IMP_ARG_launch_5(func, pe, ...) SMECY_IMP_ARG_launch_4(func, pe, __VA_ARGS__),SMECY_IMP_VAR_ARG(func, 5, pe, __VA_ARGS__)
#define SMECY_IMP_launch_5(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func(SMECY_IMP_ARG_launch_5(func, pe, __VA_ARGS__)))
#define SMECY_IMP_ARG_launch_6(func, pe, ...) SMECY_IMP_ARG_launch_5(func, pe, __VA_ARGS__),SMECY_IMP_VAR_ARG(func, 6, pe, __VA_ARGS__)
#define SMECY_IMP_launch_6(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func(SMECY_IMP_ARG_launch_6(func, pe, __VA_ARGS__)))
#define SMECY_IMP_ARG_launch_7(func, pe, ...) SMECY_IMP_ARG_launch_6(func, pe, __VA_ARGS__),SMECY_IMP_VAR_ARG(func, 7, pe, __VA_ARGS__)
#define SMECY_IMP_launch_7(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func(SMECY_IMP_ARG_launch_7(func, pe, __VA_ARGS__)))
#define SMECY_IMP_ARG_launch_8(func, pe, ...) SMECY_IMP_ARG_launch_7(func, pe, __VA_ARGS__),SMECY_IMP_VAR_ARG(func, 8, pe, __VA_ARGS__)
#define SMECY_IMP_launch_8(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func(SMECY_IMP_ARG_launch_8(func, pe, __VA_ARGS__)))
#define SMECY_IMP_ARG_launch_9(func, pe, ...) SMECY_IMP_ARG_launch_8(func, pe, __VA_ARGS__),SMECY_IMP_VAR_ARG(func, 9, pe, __VA_ARGS__)
#define SMECY_IMP_launch_9(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func(SMECY_IMP_ARG_launch_9(func, pe, __VA_ARGS__)))
#define SMECY_IMP_ARG_launch_10(func, pe, ...) SMECY_IMP_ARG_launch_9(func, pe, __VA_ARGS__),SMECY_IMP_VAR_ARG(func, 10, pe, __VA_ARGS__)
#define SMECY_IMP_launch_10(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func(SMECY_IMP_ARG_launch_10(func, pe, __VA_ARGS__)))
#define SMECY_IMP_ARG_launch_11(func, pe, ...) SMECY_IMP_ARG_launch_10(func, pe, __VA_ARGS__),SMECY_IMP_VAR_ARG(func, 11, pe, __VA_ARGS__)
#define SMECY_IMP_launch_11(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func(SMECY_IMP_ARG_launch_11(func, pe, __VA_ARGS__)))
#define SMECY_IMP_ARG_launch_12(func, pe, ...) SMECY_IMP_ARG_launch_11(func, pe, __VA_ARGS__),SMECY_IMP_VAR_ARG(func, 12, pe, __VA_ARGS__)
#define SMECY_IMP_launch_12(func, pe, ...) \
SMECY_IMP_LAUNCH_WRAPPER(func(SMECY_IMP_ARG_launch_12(func, pe, __VA_ARGS__)))
/* Dispatching code */
/* Initialize MCAPI on an accelerator node.
*/
static void SMECY_init_mcapi_node(int smecy_cluster, int smecy_pe) {
mcapi_info_t info;
mcapi_status_t status;
#ifdef SMECY_MCA_API_DEBUG_LEVEL
/* Set the requested debug level of the MCA API itself */
mcapi_set_debug_level(SMECY_MCA_API_DEBUG_LEVEL);
#endif
// init node attributes. Not clear in which MCAPI version it is needed...
/* It looks like in the Linux MCAPI implementation reference from MCA,
even the 2.015 version looks like a V1 interface... */
#if (MCAPI_VERSION >= 2000)
mcapi_node_attributes_t node_attributes;
mcapi_node_init_attributes(&node_attributes, &status);
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "Initializing MCAPI attributes");
/* 6 arguments in V.2 */
mcapi_initialize(smecy_cluster, smecy_pe,
&node_attributes, NULL, &info, &status);
MCAPI_TRACE_MODE_ONLY_C("Fabric initialization V.2 done");
#else
/* 5 arguments in V.1 */
mcapi_initialize(smecy_cluster, smecy_pe,
NULL, &info, &status);
MCAPI_TRACE_MODE_ONLY_C("Fabric initialization V.1 done");
#endif
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "Initialization of smecy_cluster"
" %d, smecy_pe %d",
smecy_cluster, smecy_pe);
}
#define SMECY_begin_accel_function_dispatch \
/* The dispatch function to be run on a PE
*/ \
void SMECY_accel_function_dispatch(int smecy_cluster, int smecy_pe) SMECY_LBRACE \
/*
Initialize MCAPI
*/ \
SMECY_init_mcapi_node(smecy_cluster, smecy_pe); \
/* Create the channels to communicate with the host
*/ \
mcapi_pktchan_send_hndl_t P4A_receive = \
SMECY_MCAPI_receive_gate_create(SMECY_MCAPI_PE_RX_PORT, \
SMECY_MCAPI_HOST_DOMAIN, \
SMECY_MCAPI_HOST_NODE, \
SMECY_MCAPI_PORT(TX,smecy_cluster,smecy_pe)); /*
*/ \
mcapi_pktchan_send_hndl_t P4A_transmit = \
SMECY_MCAPI_send_gate_create(SMECY_MCAPI_PE_TX_PORT, \
SMECY_MCAPI_HOST_DOMAIN, \
SMECY_MCAPI_HOST_NODE, \
SMECY_MCAPI_PORT(RX,smecy_cluster,smecy_pe)); /*
Enter the infinite service loop on the PE
*/ \
for(;;) SMECY_LBRACE \
SMECY_PRINT_VERBOSE("PE %d %d is waiting for a job", \
smecy_cluster, smecy_pe) \
/* Wait for the function name to run:
*/ \
char *function_name; \
size_t P4A_received_size; \
mcapi_status_t SMECY_MCAPI_status; \
mcapi_pktchan_recv(P4A_receive, \
(void **)&function_name, \
&P4A_received_size, \
&SMECY_MCAPI_status); \
/* Check the correct execution
*/ \
SMECY_MCAPI_CHECK_STATUS_MESSAGE(SMECY_MCAPI_status, \
"mcapi_pktchan_recv from receive " \
"gate %#tx '%s' of length %#zx", \
SMECY_CHAN_INFO(P4A_receive), \
function_name, \
P4A_received_size); \
do SMECY_LBRACE \
/* Find the right function to run
*/
#define SMECY_end_accel_function_dispatch \
/* If we get here, we did not encounter a break and so nothing \
matches the requested function */ \
SMECY_printf("No candidate function to execute \"%s\" on PE %d %d", \
function_name, smecy_cluster, smecy_pe); \
exit(-1); \
SMECY_RBRACE while (0); \
mcapi_pktchan_release(function_name, \
&SMECY_MCAPI_status); \
/* Check the correct execution \
*/ \
SMECY_MCAPI_CHECK_STATUS_MESSAGE(SMECY_MCAPI_status, \
"mcapi_pktchan_release %p", \
function_name); \
SMECY_MCAPI_CHECK_STATUS(SMECY_MCAPI_status); \
/* End of the PE accelerator polling loop. \
*/ \
SMECY_RBRACE \
/* There is no finalize because of the infinite loop in \
smecy_accel_function_dispatch \
*/ \
SMECY_RBRACE
/* The parameter given to the accelerator function */
#define SMECY_accel_func_args \
mcapi_pktchan_send_hndl_t P4A_transmit, \
mcapi_pktchan_recv_hndl_t P4A_receive
#define SMECY_dispatch_accel_func(function, instance) \
/* If we receive a message to activate this function
launch it!
Of course in a final implementation, do not use this linear search
with string comparisons...
*/ \
if (strcmp(function_name, #function) == 0) { \
/* Call the accelerator function without any parameter
since the parameter as indeed used as only local variables
inside
*/ \
SMECY_PRINT_VERBOSE("PE %d %d is executing instance " #instance \
" of function \"" #function "\"", \
smecy_cluster, smecy_pe) \
smecy_accel_##function##_##instance(P4A_transmit, P4A_receive); \
/* Wait for next job to do
*/ \
break; \
}
#ifdef SMECY_MCAPI_HOST
/* Nothing to do here since smecc should have injected at the begining of
the main() function a call to SMECY_initialize_then_finalize that
initialize MCAPI on the host at the begining and finilize MCAPI at the
end of the program. */
#else
#ifdef MCAPI_STHORM
#define SMECY_start_PEs_dispatch \
/* Get the MCAPI coordinates before registering to MCAPI */ \
void SMECY_thread_entry(void *args) { \
MCAPI_TRACE_MODE_ONLY_C("Local PE thread started"); \
mcapi_status_t status; \
mcapi_domain_t domain_id = mcapi_domain_id_get(&status); \
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "Get the domain id"); \
mcapi_node_t node_id = mcapi_node_id_get(&status); \
SMECY_MCAPI_CHECK_STATUS_MESSAGE(status, "Get the node id"); \
SMECY_accel_function_dispatch(domain_id, node_id); \
} \
\
/* This is called by the STHORM MCAPI run-time on all the fabric
clusters (= MCAPI domains) */ \
void mcapi_domain_entry() { \
MCAPI_TRACE_MODE_ONLY_C("Local cluster started"); \
/* Create all the threads on the nodes of the current cluster */ \
for(int node_id = 0; node_id != SMECY_PE_NB; ++node_id) \
create_node(node_id, SMECY_thread_entry, NULL, NULL, NULL); \
}
#else
#define SMECY_start_PEs_dispatch \
/* Main function that starts all the MCAPI threads that runs on the
accelerator */ \
int main() { \
/* Create OpenMP threads to launch all MCAPI nodes instead of running
the old main
*/ \
_Pragma("omp parallel for num_threads(SMECY_CLUSTER_NB)") \
for(int smecy_cluster = 0; smecy_cluster < SMECY_CLUSTER_NB; ++smecy_cluster) { \
_Pragma("omp parallel for num_threads(SMECY_PE_NB)") \
for(int smecy_pe = 0; smecy_pe < SMECY_PE_NB; ++smecy_pe) \
SMECY_accel_function_dispatch(smecy_cluster, smecy_pe); \
} \
return 0; \
}
#endif
#endif
/* Implementation macros to deal with streaming */
/* Not implemented yet in MCAPI */
|
DataGen.h | // Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#pragma once
#include "common/Schema.h"
#include <random>
#include <memory>
#include <cstring>
#include "segcore/SegmentGrowing.h"
#include "segcore/SegmentSealed.h"
#include "Constants.h"
#include <boost/algorithm/string/predicate.hpp>
#include "segcore/SegmentSealed.h"
#include <knowhere/index/vector_index/VecIndex.h>
#include <knowhere/index/vector_index/adapter/VectorAdapter.h>
#include <knowhere/index/vector_index/VecIndexFactory.h>
#include <knowhere/index/vector_index/IndexIVF.h>
#include <query/SearchOnIndex.h>
using boost::algorithm::starts_with;
namespace milvus::segcore {
struct GeneratedData {
std::vector<uint8_t> rows_;
std::vector<aligned_vector<uint8_t>> cols_;
std::vector<idx_t> row_ids_;
std::vector<Timestamp> timestamps_;
RowBasedRawData raw_;
template <typename T>
auto
get_col(int index) const {
auto& target = cols_.at(index);
std::vector<T> ret(target.size() / sizeof(T));
memcpy(ret.data(), target.data(), target.size());
return ret;
}
template <typename T>
auto
get_mutable_col(int index) {
auto& target = cols_.at(index);
assert(target.size() == row_ids_.size() * sizeof(T));
auto ptr = reinterpret_cast<T*>(target.data());
return ptr;
}
private:
GeneratedData() = default;
friend GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed);
void
generate_rows(int64_t N, SchemaPtr schema);
};
inline void
GeneratedData::generate_rows(int64_t N, SchemaPtr schema) {
std::vector<int> offset_infos(schema->size() + 1, 0);
auto sizeof_infos = schema->get_sizeof_infos();
std::partial_sum(sizeof_infos.begin(), sizeof_infos.end(), offset_infos.begin() + 1);
int64_t len_per_row = offset_infos.back();
assert(len_per_row == schema->get_total_sizeof());
// change column-based data to row-based data
std::vector<uint8_t> result(len_per_row * N);
for (int index = 0; index < N; ++index) {
for (int fid = 0; fid < schema->size(); ++fid) {
auto len = sizeof_infos[fid];
auto offset = offset_infos[fid];
auto src = cols_[fid].data() + index * len;
auto dst = result.data() + index * len_per_row + offset;
memcpy(dst, src, len);
}
}
rows_ = std::move(result);
raw_.raw_data = rows_.data();
raw_.sizeof_per_row = schema->get_total_sizeof();
raw_.count = N;
}
inline GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed = 42) {
using std::vector;
std::vector<aligned_vector<uint8_t>> cols;
std::default_random_engine er(seed);
std::normal_distribution<> distr(0, 1);
int offset = 0;
auto insert_cols = [&cols](auto& data) {
using T = std::remove_reference_t<decltype(data)>;
auto len = sizeof(typename T::value_type) * data.size();
auto ptr = aligned_vector<uint8_t>(len);
memcpy(ptr.data(), data.data(), len);
cols.emplace_back(std::move(ptr));
};
for (auto& field : schema->get_fields()) {
switch (field.get_data_type()) {
case engine::DataType::VECTOR_FLOAT: {
auto dim = field.get_dim();
vector<float> final(dim * N);
bool is_ip = starts_with(field.get_name().get(), "normalized");
#pragma omp parallel for
for (int n = 0; n < N; ++n) {
vector<float> data(dim);
float sum = 0;
std::default_random_engine er2(seed + n);
std::normal_distribution<> distr2(0, 1);
for (auto& x : data) {
x = distr2(er2) + offset;
sum += x * x;
}
if (is_ip) {
sum = sqrt(sum);
for (auto& x : data) {
x /= sum;
}
}
std::copy(data.begin(), data.end(), final.begin() + dim * n);
}
insert_cols(final);
break;
}
case engine::DataType::VECTOR_BINARY: {
auto dim = field.get_dim();
Assert(dim % 8 == 0);
vector<uint8_t> data(dim / 8 * N);
for (auto& x : data) {
x = er();
}
insert_cols(data);
break;
}
case engine::DataType::INT64: {
vector<int64_t> data(N);
// begin with counter
if (starts_with(field.get_name().get(), "counter")) {
int64_t index = 0;
for (auto& x : data) {
x = index++;
}
} else {
int i = 0;
for (auto& x : data) {
x = er() % (2 * N);
x = i;
i++;
}
}
insert_cols(data);
break;
}
case engine::DataType::INT32: {
vector<int> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data);
break;
}
case engine::DataType::FLOAT: {
vector<float> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data);
break;
}
case engine::DataType::DOUBLE: {
vector<double> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data);
break;
}
default: {
throw std::runtime_error("unimplemented");
}
}
++offset;
}
GeneratedData res;
res.cols_ = std::move(cols);
for (int i = 0; i < N; ++i) {
res.row_ids_.push_back(i);
res.timestamps_.push_back(i);
}
// std::shuffle(res.row_ids_.begin(), res.row_ids_.end(), er);
res.generate_rows(N, schema);
return std::move(res);
}
inline auto
CreatePlaceholderGroup(int64_t num_queries, int dim, int64_t seed = 42) {
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
std::normal_distribution<double> dis(0, 1);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(dis(e));
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreatePlaceholderGroupFromBlob(int64_t num_queries, int dim, const float* src) {
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
int64_t src_index = 0;
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(src[src_index++]);
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(e());
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroupFromBlob(int64_t num_queries, int64_t dim, const uint8_t* ptr) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(*ptr);
++ptr;
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline json
SearchResultToJson(const SearchResult& sr) {
int64_t num_queries = sr.num_queries_;
int64_t topk = sr.topk_;
std::vector<std::vector<std::string>> results;
for (int q = 0; q < num_queries; ++q) {
std::vector<std::string> result;
for (int k = 0; k < topk; ++k) {
int index = q * topk + k;
result.emplace_back(std::to_string(sr.internal_seg_offsets_[index]) + "->" +
std::to_string(sr.result_distances_[index]));
}
results.emplace_back(std::move(result));
}
return json{results};
};
inline void
SealedLoader(const GeneratedData& dataset, SegmentSealed& seg) {
// TODO
auto row_count = dataset.row_ids_.size();
{
LoadFieldDataInfo info;
info.blob = dataset.row_ids_.data();
info.row_count = dataset.row_ids_.size();
info.field_id = 0; // field id for RowId
seg.LoadFieldData(info);
}
{
LoadFieldDataInfo info;
info.blob = dataset.timestamps_.data();
info.row_count = dataset.timestamps_.size();
info.field_id = 1;
seg.LoadFieldData(info);
}
int field_offset = 0;
for (auto& meta : seg.get_schema().get_fields()) {
LoadFieldDataInfo info;
info.field_id = meta.get_id().get();
info.row_count = row_count;
info.blob = dataset.cols_[field_offset].data();
seg.LoadFieldData(info);
++field_offset;
}
}
inline std::unique_ptr<SegmentSealed>
SealedCreator(SchemaPtr schema, const GeneratedData& dataset, const LoadIndexInfo& index_info) {
auto segment = CreateSealedSegment(schema);
SealedLoader(dataset, *segment);
segment->LoadIndex(index_info);
return segment;
}
inline knowhere::VecIndexPtr
GenIndexing(int64_t N, int64_t dim, const float* vec) {
// {knowhere::IndexParams::nprobe, 10},
auto conf = knowhere::Config{{knowhere::meta::DIM, dim},
{knowhere::IndexParams::nlist, 1024},
{knowhere::Metric::TYPE, milvus::knowhere::Metric::L2},
{knowhere::meta::DEVICEID, 0}};
auto database = knowhere::GenDataset(N, dim, vec);
auto indexing = std::make_shared<knowhere::IVF>();
indexing->Train(database, conf);
indexing->AddWithoutIds(database, conf);
return indexing;
}
} // namespace milvus::segcore
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.