source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
graphReConstruction.c | /*
Copyright 2007, 2008 Daniel Zerbino (zerbino@ebi.ac.uk)
This file is part of Velvet.
Velvet is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Velvet is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Velvet; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <limits.h>
#include <sys/time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "globals.h"
#include "graph.h"
#include "passageMarker.h"
#include "readSet.h"
#include "tightString.h"
#include "recycleBin.h"
#include "utility.h"
#include "kmer.h"
#include "kmerOccurenceTable.h"
#include "roadMap.h"
#define ADENINE 0
#define CYTOSINE 1
#define GUANINE 2
#define THYMINE 3
//////////////////////////////////////////////////////////
// Node Locking
//////////////////////////////////////////////////////////
#ifdef _OPENMP
/* Array of per-node locks */
static omp_lock_t *nodeLocks = NULL;
static void
createNodeLocks(Graph *graph)
{
IDnum nbNodes;
IDnum nodeIndex;
nbNodes = nodeCount(graph) + 1;
if (nodeLocks)
free (nodeLocks);
nodeLocks = mallocOrExit(nbNodes, omp_lock_t);
#pragma omp parallel for
for (nodeIndex = 0; nodeIndex < nbNodes; nodeIndex++)
omp_init_lock(nodeLocks + nodeIndex);
}
static inline void lockNode(Node *node)
{
IDnum nodeID = getNodeID(node);
if (nodeID < 0)
nodeID = -nodeID;
omp_set_lock (nodeLocks + nodeID);
}
/* Assumes node is already locked */
static inline void lockTwoNodes(Node *node, Node *node2)
{
IDnum nodeID = getNodeID(node);
IDnum node2ID = getNodeID(node2);
if (nodeID < 0)
nodeID = -nodeID;
if (node2ID < 0)
node2ID = -node2ID;
if (nodeID == node2ID)
return;
/* Lock lowest ID first to avoid deadlocks */
if (nodeID < node2ID)
{
omp_set_lock (nodeLocks + node2ID);
}
else if (!omp_test_lock (nodeLocks + node2ID))
{
omp_unset_lock (nodeLocks + nodeID);
omp_set_lock (nodeLocks + node2ID);
omp_set_lock (nodeLocks + nodeID);
}
}
static inline void unLockTwoNodes(Node *node, Node *node2)
{
IDnum nodeID = getNodeID(node);
IDnum node2ID = getNodeID(node2);
if (nodeID < 0)
nodeID = -nodeID;
if (node2ID < 0)
node2ID = -node2ID;
omp_unset_lock (nodeLocks + nodeID);
if (nodeID != node2ID)
omp_unset_lock (nodeLocks + node2ID);
}
static inline void unLockNode(Node *node)
{
IDnum nodeID = getNodeID(node);
if (nodeID < 0)
nodeID = -nodeID;
omp_unset_lock (nodeLocks + nodeID);
}
#endif
//////////////////////////////////////////////////////////
// Node Lists
//////////////////////////////////////////////////////////
typedef struct smallNodeList_st SmallNodeList;
struct smallNodeList_st {
Node *node;
SmallNodeList *next;
} ATTRIBUTE_PACKED;
static RecycleBin *smallNodeListMemory = NULL;
#define BLOCKSIZE 1000
#ifdef _OPENMP
static void initSmallNodeListMemory(void)
{
int n = omp_get_max_threads();
#pragma omp critical
{
if (smallNodeListMemory == NULL)
smallNodeListMemory = newRecycleBinArray(n, sizeof(SmallNodeList), BLOCKSIZE);
}
}
#endif
static SmallNodeList *allocateSmallNodeList()
{
#ifdef _OPENMP
#ifdef DEBUG
if (smallNodeListMemory == NULL)
{
velvetLog("The memory for small nodes seems uninitialised, "
"this is probably a bug, aborting.\n");
abort();
}
#endif
return allocatePointer(getRecycleBinInArray(smallNodeListMemory,
omp_get_thread_num()));
#else
if (smallNodeListMemory == NULL)
smallNodeListMemory = newRecycleBin(sizeof(SmallNodeList), BLOCKSIZE);
return (SmallNodeList*)allocatePointer(smallNodeListMemory);
#endif
}
static void deallocateSmallNodeList(SmallNodeList * smallNodeList)
{
#ifdef _OPENMP
deallocatePointer(getRecycleBinInArray(smallNodeListMemory,
omp_get_thread_num()),
smallNodeList);
#else
deallocatePointer(smallNodeListMemory, smallNodeList);
#endif
}
static void destroySmallNodeListMemmory(void)
{
if (smallNodeListMemory != NULL)
{
#ifdef _OPENMP
destroyRecycleBinArray(smallNodeListMemory);
#else
destroyRecycleBin(smallNodeListMemory);
#endif
smallNodeListMemory = NULL;
}
}
static inline void memorizeNode(Node * node, SmallNodeList ** nodePile)
{
SmallNodeList *list = allocateSmallNodeList();
list->node = node;
list->next = *nodePile;
*nodePile = list;
#ifndef _OPENMP
setSingleNodeStatus(node, true);
#endif
}
static inline boolean isNodeMemorized(Node * node, SmallNodeList * nodePile)
{
#ifdef _OPENMP
/* SF TODO There must be a faster way to do this: bit mask, hash table, tree, ... ? */
SmallNodeList * list;
for (list = nodePile; list; list = list->next)
if (list->node == node)
return true;
return false;
#else
return getNodeStatus(node);
#endif
}
static void unMemorizeNodes(SmallNodeList ** nodePile)
{
SmallNodeList * list;
while (*nodePile) {
list = *nodePile;
*nodePile = list->next;
#ifndef _OPENMP
setSingleNodeStatus(list->node, false);
#endif
deallocateSmallNodeList(list);
}
}
///////////////////////////////////////////////////////////
// Reference Mappings
///////////////////////////////////////////////////////////
typedef struct referenceMapping_st ReferenceMapping;
struct referenceMapping_st {
IDnum referenceStart;
IDnum nodeStart;
IDnum length;
IDnum referenceID;
IDnum nodeID;
} ATTRIBUTE_PACKED;
static IDnum countMappings(char * preGraphFilename) {
FILE *file = fopen(preGraphFilename, "r");
const int maxline = MAXLINE;
char line[MAXLINE];
IDnum count = 0;
// Go past NODE blocks
while(fgets(line, maxline, file))
if (line[0] == 'S')
break;
// Count relevant lines
while(fgets(line, maxline, file))
if (line[0] != 'S')
count++;
fclose(file);
return count;
}
static ReferenceMapping * recordReferenceMappings(char * preGraphFilename, IDnum arrayLength) {
ReferenceMapping * mappings = callocOrExit(arrayLength, ReferenceMapping);
FILE *file = fopen(preGraphFilename, "r");
const int maxline = MAXLINE;
char line[MAXLINE];
ReferenceMapping * current = mappings;
IDnum referenceID;
long long_var;
long long coord1, coord2, coord3;
// Go past NODE blocks
while(fgets(line, maxline, file))
if (line[0] == 'S')
break;
sscanf(line, "SEQ\t%li\n", &long_var);
referenceID = long_var;
// Go relevant lines
while(fgets(line, maxline, file)) {
if (line[0] != 'S') {
sscanf(line, "%li\t%lli\t%lli\t%lli\n", &long_var, &coord1, &coord2, &coord3);
current->referenceID = referenceID;
current->nodeID = long_var;
current->nodeStart = coord1;
current->referenceStart = coord2;
current->length = coord3;
current++;
} else {
sscanf(line, "SEQ\t%li\n", &long_var);
referenceID = long_var;
}
}
fclose(file);
return mappings;
}
static int compareRefMaps(const void * ptrA, const void * ptrB) {
ReferenceMapping * A = (ReferenceMapping *) ptrA;
ReferenceMapping * B = (ReferenceMapping *) ptrB;
if (A->referenceID > B->referenceID)
return 1;
else if (A->referenceID < B->referenceID)
return -1;
else {
if (A->referenceStart >= B->referenceStart + B->length)
return 1;
else if (A->referenceStart + A->length <= B->referenceStart)
return -1;
else
return 0;
}
}
static ReferenceMapping * computeReferenceMappings(char * preGraphFilename, ReadSet * reads, Coordinate * referenceMappingLength, IDnum * referenceCount) {
IDnum index;
ReferenceMapping * referenceMappings;
for(index = 0; index < reads->readCount && reads->categories[index] == 2 * CATEGORIES + 2; index++)
(*referenceCount)++;
if (*referenceCount == 0) {
*referenceMappingLength = 0;
return NULL;
}
*referenceMappingLength = countMappings(preGraphFilename);
if (*referenceMappingLength == 0)
return NULL;
referenceMappings = recordReferenceMappings(preGraphFilename, *referenceMappingLength);
qsort(referenceMappings, *referenceMappingLength, sizeof(ReferenceMapping), compareRefMaps);
return referenceMappings;
}
static ReferenceMapping * findReferenceMapping(IDnum seqID, Coordinate refCoord, ReferenceMapping * referenceMappings, Coordinate referenceMappingCount) {
IDnum positive_seqID;
Coordinate leftIndex = 0;
Coordinate rightIndex = referenceMappingCount - 1;
Coordinate middleIndex;
ReferenceMapping refMap;
int comparison;
if (seqID > 0)
positive_seqID = seqID;
else
positive_seqID = -seqID;
refMap.referenceID = positive_seqID;
refMap.referenceStart = refCoord;
refMap.length = 1;
refMap.nodeStart = 0;
refMap.nodeID = 0;
if (compareRefMaps(&(referenceMappings[leftIndex]), &refMap) == 0)
return &(referenceMappings[leftIndex]);
if (compareRefMaps(&(referenceMappings[rightIndex]), &refMap) == 0)
return &(referenceMappings[rightIndex]);
while (true) {
middleIndex = (rightIndex + leftIndex) / 2;
comparison = compareRefMaps(&(referenceMappings[middleIndex]), &refMap);
if (leftIndex >= rightIndex)
return NULL;
else if (comparison == 0)
return &(referenceMappings[middleIndex]);
else if (leftIndex == middleIndex)
return NULL;
else if (comparison > 0)
rightIndex = middleIndex;
else
leftIndex = middleIndex;
}
}
///////////////////////////////////////////////////////////
// Node Mask
///////////////////////////////////////////////////////////
typedef struct nodeMask_st NodeMask;
struct nodeMask_st {
IDnum nodeID;
IDnum start;
IDnum finish;
} ATTRIBUTE_PACKED;
static int compareNodeMasks(const void * ptrA, const void * ptrB) {
NodeMask * A = (NodeMask *) ptrA;
NodeMask * B = (NodeMask *) ptrB;
if (A->nodeID < B->nodeID)
return -1;
else if (A->nodeID > B->nodeID)
return 1;
else {
if (A->start < B->start)
return -1;
else if (A->start > B->start)
return 1;
else
return 0;
}
}
static NodeMask * computeNodeMasks(ReferenceMapping * referenceMappings, Coordinate arrayLength, Graph * graph) {
NodeMask * nodeMasks;
NodeMask * currentMask;
ReferenceMapping * currentMapping = referenceMappings;
Coordinate index;
if (referenceMappings == NULL)
return NULL;
nodeMasks = callocOrExit(arrayLength, NodeMask);
currentMask = nodeMasks;
for (index = 0; index < arrayLength; index++) {
if (currentMapping->nodeID > 0) {
currentMask->nodeID = currentMapping->nodeID;
} else {
currentMask->nodeID = -currentMapping->nodeID;
}
currentMask->start = currentMapping->nodeStart;
currentMask->finish = currentMapping->nodeStart + currentMapping->length;
currentMask++;
currentMapping++;
}
qsort(nodeMasks, arrayLength, sizeof(NodeMask), compareNodeMasks);
return nodeMasks;
}
///////////////////////////////////////////////////////////
// Process
///////////////////////////////////////////////////////////
static KmerOccurenceTable *referenceGraphKmers(char *preGraphFilename,
short int accelerationBits, Graph * graph, boolean double_strand, NodeMask * nodeMasks, Coordinate nodeMaskCount)
{
FILE *file = fopen(preGraphFilename, "r");
const int maxline = MAXLINE;
char line[MAXLINE];
char c;
int wordLength;
Coordinate lineLength, kmerCount;
Kmer word;
Kmer antiWord;
KmerOccurenceTable *kmerTable;
IDnum index;
IDnum nodeID = 0;
Nucleotide nucleotide;
NodeMask * nodeMask = nodeMasks;
Coordinate nodeMaskIndex = 0;
if (file == NULL)
exitErrorf(EXIT_FAILURE, true, "Could not open %s", preGraphFilename);
// Count kmers
velvetLog("Scanning pre-graph file %s for k-mers\n",
preGraphFilename);
// First line
if (!fgets(line, maxline, file))
exitErrorf(EXIT_FAILURE, true, "PreGraph file incomplete");
sscanf(line, "%*i\t%*i\t%i\n", &wordLength);
kmerTable = newKmerOccurenceTable(accelerationBits, wordLength);
// Read nodes
if (!fgets(line, maxline, file))
exitErrorf(EXIT_FAILURE, true, "PreGraph file incomplete");
kmerCount = 0;
while (line[0] == 'N') {
lineLength = 0;
while ((c = getc(file)) != EOF && c != '\n')
lineLength++;
kmerCount += lineLength - wordLength + 1;
if (fgets(line, maxline, file) == NULL)
break;
}
velvetLog("%li kmers found\n", (long) kmerCount);
for(nodeMaskIndex = 0; nodeMaskIndex < nodeMaskCount; nodeMaskIndex++) {
kmerCount -= nodeMasks[nodeMaskIndex].finish -
nodeMasks[nodeMaskIndex].start;
}
nodeMaskIndex = 0;
fclose(file);
// Create table
allocateKmerOccurences(kmerCount, kmerTable);
// Fill table
file = fopen(preGraphFilename, "r");
if (file == NULL)
exitErrorf(EXIT_FAILURE, true, "Could not open %s", preGraphFilename);
if (!fgets(line, maxline, file))
exitErrorf(EXIT_FAILURE, true, "PreGraph file incomplete");
// Read nodes
if (!fgets(line, maxline, file))
exitErrorf(EXIT_FAILURE, true, "PreGraph file incomplete");
while (line[0] == 'N') {
nodeID++;
// Fill in the initial word :
clearKmer(&word);
clearKmer(&antiWord);
for (index = 0; index < wordLength - 1; index++) {
c = getc(file);
if (c == 'A')
nucleotide = ADENINE;
else if (c == 'C')
nucleotide = CYTOSINE;
else if (c == 'G')
nucleotide = GUANINE;
else if (c == 'T')
nucleotide = THYMINE;
else if (c == '\n')
exitErrorf(EXIT_FAILURE, true, "PreGraph file incomplete");
else
nucleotide = ADENINE;
pushNucleotide(&word, nucleotide);
if (double_strand) {
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
}
}
// Scan through node
index = 0;
while((c = getc(file)) != '\n' && c != EOF) {
if (c == 'A')
nucleotide = ADENINE;
else if (c == 'C')
nucleotide = CYTOSINE;
else if (c == 'G')
nucleotide = GUANINE;
else if (c == 'T')
nucleotide = THYMINE;
else
nucleotide = ADENINE;
pushNucleotide(&word, nucleotide);
if (double_strand) {
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
}
// Update mask if necessary
if (nodeMask) {
if (nodeMask->nodeID < nodeID || (nodeMask->nodeID == nodeID && index >= nodeMask->finish)) {
if (++nodeMaskIndex == nodeMaskCount)
nodeMask = NULL;
else
nodeMask++;
}
}
// Check if not masked!
if (nodeMask) {
if (nodeMask->nodeID == nodeID && index >= nodeMask->start && index < nodeMask->finish) {
index++;
continue;
}
}
if (!double_strand || compareKmers(&word, &antiWord) <= 0)
recordKmerOccurence(&word, nodeID, index, kmerTable);
else
recordKmerOccurence(&antiWord, -nodeID, getNodeLength(getNodeInGraph(graph, nodeID)) - 1 - index, kmerTable);
index++;
}
if (fgets(line, maxline, file) == NULL)
break;
}
fclose(file);
// Sort table
sortKmerOccurenceTable(kmerTable);
return kmerTable;
}
static void ghostThreadSequenceThroughGraph(TightString * tString,
KmerOccurenceTable *
kmerTable, Graph * graph,
IDnum seqID, Category category,
boolean readTracking,
boolean double_strand,
ReferenceMapping * referenceMappings,
Coordinate referenceMappingCount,
IDnum refCount,
Annotation * annotations,
IDnum annotationCount,
boolean second_in_pair)
{
Kmer word;
Kmer antiWord;
Coordinate readNucleotideIndex;
KmerOccurence *kmerOccurence;
int wordLength = getWordLength(graph);
Nucleotide nucleotide;
IDnum refID;
Coordinate refCoord;
ReferenceMapping * refMap = NULL;
Coordinate uniqueIndex = 0;
Coordinate annotIndex = 0;
IDnum annotCount = 0;
boolean reversed;
SmallNodeList * nodePile = NULL;
Annotation * annotation = annotations;
Node *node;
Node *previousNode = NULL;
// Neglect any read which will not be short paired
if ((!readTracking && category % 2 == 0)
|| category / 2 >= CATEGORIES)
return;
// Neglect any string shorter than WORDLENGTH :
if (getLength(tString) < wordLength)
return;
// Verify that all short reads are reasonnably short
if (getLength(tString) > USHRT_MAX) {
velvetLog("Short read of length %lli, longer than limit %i\n",
(long long) getLength(tString), SHRT_MAX);
velvetLog("You should better declare this sequence as long, because it genuinely is!\n");
exit(1);
}
clearKmer(&word);
clearKmer(&antiWord);
// Fill in the initial word :
for (readNucleotideIndex = 0;
readNucleotideIndex < wordLength - 1; readNucleotideIndex++) {
nucleotide = getNucleotide(readNucleotideIndex, tString);
pushNucleotide(&word, nucleotide);
if (double_strand || second_in_pair) {
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
}
}
// Go through sequence
while (readNucleotideIndex < getLength(tString)) {
// Shift word:
nucleotide = getNucleotide(readNucleotideIndex++, tString);
pushNucleotide(&word, nucleotide);
if (double_strand || second_in_pair) {
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
}
// Update annotation if necessary
if (annotCount < annotationCount && annotIndex == getAnnotationLength(annotation)) {
annotation = getNextAnnotation(annotation);
annotCount++;
annotIndex = 0;
}
// Search for reference mapping
if (annotCount < annotationCount && uniqueIndex >= getPosition(annotation) && getAnnotSequenceID(annotation) <= refCount && getAnnotSequenceID(annotation) >= -refCount) {
refID = getAnnotSequenceID(annotation);
if (refID > 0)
refCoord = getStart(annotation) + annotIndex;
else
refCoord = getStart(annotation) - annotIndex;
refMap = findReferenceMapping(refID, refCoord, referenceMappings, referenceMappingCount);
// If success
if (refMap) {
if (refID > 0)
node = getNodeInGraph(graph, refMap->nodeID);
else
node = getNodeInGraph(graph, -refMap->nodeID);
} else {
node = NULL;
if (previousNode)
break;
}
}
// if not.. look in table
else {
reversed = false;
if (double_strand) {
if (compareKmers(&word, &antiWord) <= 0) {
kmerOccurence =
findKmerInKmerOccurenceTable(&word,
kmerTable);
} else {
kmerOccurence =
findKmerInKmerOccurenceTable(&antiWord,
kmerTable);
reversed = true;
}
} else {
if (!second_in_pair) {
kmerOccurence =
findKmerInKmerOccurenceTable(&word,
kmerTable);
} else {
kmerOccurence =
findKmerInKmerOccurenceTable(&antiWord,
kmerTable);
reversed = true;
}
}
if (kmerOccurence) {
if (!reversed)
node = getNodeInGraph(graph, getKmerOccurenceNodeID(kmerOccurence));
else
node = getNodeInGraph(graph, -getKmerOccurenceNodeID(kmerOccurence));
} else {
node = NULL;
if (previousNode)
break;
}
}
if (annotCount < annotationCount && uniqueIndex >= getPosition(annotation))
annotIndex++;
else
uniqueIndex++;
previousNode = node;
// Fill in graph
if (node && !isNodeMemorized(node, nodePile))
{
#ifdef _OPENMP
lockNode(node);
#endif
incrementReadStartCount(node, graph);
#ifdef _OPENMP
unLockNode(node);
#endif
memorizeNode(node, &nodePile);
}
}
unMemorizeNodes(&nodePile);
}
static void threadSequenceThroughGraph(TightString * tString,
KmerOccurenceTable * kmerTable,
Graph * graph,
IDnum seqID, Category category,
boolean readTracking,
boolean double_strand,
ReferenceMapping * referenceMappings,
Coordinate referenceMappingCount,
IDnum refCount,
Annotation * annotations,
IDnum annotationCount,
boolean second_in_pair)
{
Kmer word;
Kmer antiWord;
Coordinate readNucleotideIndex;
Coordinate kmerIndex;
KmerOccurence *kmerOccurence;
int wordLength = getWordLength(graph);
PassageMarkerI marker = NULL_IDX;
PassageMarkerI previousMarker = NULL_IDX;
Node *node = NULL;
Node *previousNode = NULL;
Coordinate coord = 0;
Coordinate previousCoord = 0;
Nucleotide nucleotide;
boolean reversed;
IDnum refID;
Coordinate refCoord = 0;
ReferenceMapping * refMap;
Annotation * annotation = annotations;
Coordinate index = 0;
Coordinate uniqueIndex = 0;
Coordinate annotIndex = 0;
IDnum annotCount = 0;
SmallNodeList * nodePile = NULL;
// Neglect any string shorter than WORDLENGTH :
if (getLength(tString) < wordLength)
return;
clearKmer(&word);
clearKmer(&antiWord);
// Fill in the initial word :
for (readNucleotideIndex = 0;
readNucleotideIndex < wordLength - 1; readNucleotideIndex++) {
nucleotide = getNucleotide(readNucleotideIndex, tString);
pushNucleotide(&word, nucleotide);
if (double_strand || second_in_pair) {
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
}
}
// Go through sequence
while (readNucleotideIndex < getLength(tString)) {
nucleotide = getNucleotide(readNucleotideIndex++, tString);
pushNucleotide(&word, nucleotide);
if (double_strand || second_in_pair) {
#ifdef COLOR
reversePushNucleotide(&antiWord, nucleotide);
#else
reversePushNucleotide(&antiWord, 3 - nucleotide);
#endif
}
// Update annotation if necessary
if (annotCount < annotationCount && annotIndex == getAnnotationLength(annotation)) {
annotation = getNextAnnotation(annotation);
annotCount++;
annotIndex = 0;
}
// Search for reference mapping
if (category == REFERENCE) {
if (referenceMappings)
refMap = findReferenceMapping(seqID, index, referenceMappings, referenceMappingCount);
else
refMap = NULL;
if (refMap) {
node = getNodeInGraph(graph, refMap->nodeID);
if (refMap->nodeID > 0) {
coord = refMap->nodeStart + (index - refMap->referenceStart);
} else {
coord = getNodeLength(node) - refMap->nodeStart - refMap->length + (index - refMap->referenceStart);
}
} else {
node = NULL;
if (previousNode)
break;
}
}
// Search for reference-based mapping
else if (annotCount < annotationCount && uniqueIndex >= getPosition(annotation) && getAnnotSequenceID(annotation) <= refCount && getAnnotSequenceID(annotation) >= -refCount) {
refID = getAnnotSequenceID(annotation);
if (refID > 0)
refCoord = getStart(annotation) + annotIndex;
else
refCoord = getStart(annotation) - annotIndex;
refMap = findReferenceMapping(refID, refCoord, referenceMappings, referenceMappingCount);
// If success
if (refMap) {
if (refID > 0) {
node = getNodeInGraph(graph, refMap->nodeID);
if (refMap->nodeID > 0) {
coord = refMap->nodeStart + (refCoord - refMap->referenceStart);
} else {
coord = getNodeLength(node) - refMap->nodeStart - refMap->length + (refCoord - refMap->referenceStart);
}
} else {
node = getNodeInGraph(graph, -refMap->nodeID);
if (refMap->nodeID > 0) {
coord = getNodeLength(node) - refMap->nodeStart - (refCoord - refMap->referenceStart) - 1;
} else {
coord = refMap->nodeStart + refMap->length - (refCoord - refMap->referenceStart) - 1;
}
}
} else {
node = NULL;
if (previousNode)
break;
}
}
// Search in table
else {
reversed = false;
if (double_strand) {
if (compareKmers(&word, &antiWord) <= 0) {
kmerOccurence =
findKmerInKmerOccurenceTable(&word,
kmerTable);
} else {
kmerOccurence =
findKmerInKmerOccurenceTable(&antiWord,
kmerTable);
reversed = true;
}
} else {
if (!second_in_pair) {
kmerOccurence =
findKmerInKmerOccurenceTable(&word,
kmerTable);
} else {
kmerOccurence =
findKmerInKmerOccurenceTable(&antiWord,
kmerTable);
reversed = true;
}
}
if (kmerOccurence) {
if (!reversed) {
node = getNodeInGraph(graph, getKmerOccurenceNodeID(kmerOccurence));
coord = getKmerOccurencePosition(kmerOccurence);
} else {
node = getNodeInGraph(graph, -getKmerOccurenceNodeID(kmerOccurence));
coord = getNodeLength(node) - getKmerOccurencePosition(kmerOccurence) - 1;
}
} else {
node = NULL;
if (previousNode)
break;
}
}
// Increment positions
if (annotCount < annotationCount && uniqueIndex >= getPosition(annotation))
annotIndex++;
else
uniqueIndex++;
// Fill in graph
if (node)
{
#ifdef _OPENMP
lockNode(node);
#endif
kmerIndex = readNucleotideIndex - wordLength;
if (previousNode == node
&& previousCoord == coord - 1) {
if (category / 2 >= CATEGORIES) {
setPassageMarkerFinish(marker,
kmerIndex +
1);
setFinishOffset(marker,
getNodeLength(node)
- coord - 1);
} else {
#ifndef SINGLE_COV_CAT
incrementVirtualCoverage(node, category / 2, 1);
incrementOriginalVirtualCoverage(node, category / 2, 1);
#else
incrementVirtualCoverage(node, 1);
#endif
}
#ifdef _OPENMP
unLockNode(node);
#endif
} else {
if (category / 2 >= CATEGORIES) {
marker =
newPassageMarker(seqID,
kmerIndex,
kmerIndex + 1,
coord,
getNodeLength
(node) -
coord - 1);
transposePassageMarker(marker,
node);
connectPassageMarkers
(previousMarker, marker,
graph);
previousMarker = marker;
} else {
if (readTracking) {
if (!isNodeMemorized(node, nodePile)) {
addReadStart(node,
seqID,
coord,
graph,
kmerIndex);
memorizeNode(node, &nodePile);
} else {
blurLastShortReadMarker
(node, graph);
}
}
#ifndef SINGLE_COV_CAT
incrementVirtualCoverage(node, category / 2, 1);
incrementOriginalVirtualCoverage(node, category / 2, 1);
#else
incrementVirtualCoverage(node, 1);
#endif
}
#ifdef _OPENMP
lockTwoNodes(node, previousNode);
#endif
createArc(previousNode, node, graph);
#ifdef _OPENMP
unLockTwoNodes(node, previousNode);
#endif
}
previousNode = node;
previousCoord = coord;
}
index++;
}
if (readTracking && category / 2 < CATEGORIES)
unMemorizeNodes(&nodePile);
}
static void fillUpGraph(ReadSet * reads,
KmerOccurenceTable * kmerTable,
Graph * graph,
boolean readTracking,
boolean double_strand,
ReferenceMapping * referenceMappings,
Coordinate referenceMappingCount,
IDnum refCount,
char * roadmapFilename)
{
IDnum readIndex;
RoadMapArray *roadmap = NULL;
Coordinate *annotationOffset = NULL;
struct timeval start, end, diff;
if (referenceMappings)
{
roadmap = importRoadMapArray(roadmapFilename);
annotationOffset = callocOrExit(reads->readCount, Coordinate);
for (readIndex = 1; readIndex < reads->readCount; readIndex++)
annotationOffset[readIndex] = annotationOffset[readIndex - 1]
+ getAnnotationCount(getRoadMapInArray(roadmap, readIndex - 1));
}
resetNodeStatus(graph);
// Allocate memory for the read pairs
if (!readStartsAreActivated(graph))
activateReadStarts(graph);
gettimeofday(&start, NULL);
#ifdef _OPENMP
initSmallNodeListMemory();
createNodeLocks(graph);
#pragma omp parallel for
#endif
for (readIndex = refCount; readIndex < reads->readCount; readIndex++)
{
Annotation * annotations = NULL;
IDnum annotationCount = 0;
Category category;
boolean second_in_pair;
if (readIndex % 1000000 == 0)
velvetLog("Ghost Threading through reads %ld / %ld\n",
(long) readIndex, (long) reads->readCount);
category = reads->categories[readIndex];
second_in_pair = reads->categories[readIndex] & 1 && isSecondInPair(reads, readIndex);
if (referenceMappings)
{
annotationCount = getAnnotationCount(getRoadMapInArray(roadmap, readIndex));
annotations = getAnnotationInArray(roadmap->annotations, annotationOffset[readIndex]);
}
ghostThreadSequenceThroughGraph(getTightStringInArray(reads->tSequences, readIndex),
kmerTable,
graph, readIndex + 1,
category,
readTracking, double_strand,
referenceMappings, referenceMappingCount,
refCount, annotations, annotationCount,
second_in_pair);
}
createNodeReadStartArrays(graph);
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
velvetLog(" === Ghost-Threaded in %ld.%06ld s\n", diff.tv_sec, diff.tv_usec);
gettimeofday(&start, NULL);
#ifdef _OPENMP
int threads = omp_get_max_threads();
if (threads > 32)
threads = 32;
#pragma omp parallel for num_threads(threads)
#endif
for (readIndex = 0; readIndex < reads->readCount; readIndex++)
{
Annotation * annotations = NULL;
IDnum annotationCount = 0;
Category category;
boolean second_in_pair;
if (readIndex % 1000000 == 0)
velvetLog("Threading through reads %li / %li\n",
(long) readIndex, (long) reads->readCount);
category = reads->categories[readIndex];
second_in_pair = reads->categories[readIndex] % 2 && isSecondInPair(reads, readIndex);
if (referenceMappings)
{
annotationCount = getAnnotationCount(getRoadMapInArray(roadmap, readIndex));
annotations = getAnnotationInArray(roadmap->annotations, annotationOffset[readIndex]);
}
threadSequenceThroughGraph(getTightStringInArray(reads->tSequences, readIndex),
kmerTable,
graph, readIndex + 1, category,
readTracking, double_strand,
referenceMappings, referenceMappingCount,
refCount, annotations, annotationCount, second_in_pair);
}
gettimeofday(&end, NULL);
timersub(&end, &start, &diff);
velvetLog(" === Threaded in %ld.%06ld s\n", diff.tv_sec, diff.tv_usec);
#ifdef _OPENMP
free(nodeLocks);
nodeLocks = NULL;
#endif
if (referenceMappings)
{
destroyRoadMapArray(roadmap);
free (annotationOffset);
}
orderNodeReadStartArrays(graph);
destroySmallNodeListMemmory();
destroyKmerOccurenceTable(kmerTable);
}
Graph *importPreGraph(char *preGraphFilename, ReadSet * reads, char * roadmapFilename,
boolean readTracking, short int accelerationBits)
{
boolean double_strand = false;
Graph *graph = readPreGraphFile(preGraphFilename, &double_strand);
Coordinate referenceMappingCount = 0;
IDnum referenceCount = 0;
if (nodeCount(graph) == 0)
return graph;
// If necessary compile reference -> node
ReferenceMapping * referenceMappings = computeReferenceMappings(preGraphFilename, reads, &referenceMappingCount, &referenceCount);
// Node -> reference maps
NodeMask * nodeMasks = computeNodeMasks(referenceMappings, referenceMappingCount, graph);
// Map k-mers to nodes
KmerOccurenceTable *kmerTable =
referenceGraphKmers(preGraphFilename, accelerationBits, graph, double_strand, nodeMasks, referenceMappingCount);
free(nodeMasks);
// Map sequences -> kmers -> nodes
fillUpGraph(reads, kmerTable, graph, readTracking, double_strand, referenceMappings, referenceMappingCount, referenceCount, roadmapFilename);
free(referenceMappings);
return graph;
}
|
config_thread_limit.c | /*******************************************************************************
* Copyright 2012-2016 Intel Corporation All Rights Reserved.
*
* The source code, information and material ("Material") contained herein is
* owned by Intel Corporation or its suppliers or licensors, and title to such
* Material remains with Intel Corporation or its suppliers or licensors. The
* Material contains proprietary information of Intel or its suppliers and
* licensors. The Material is protected by worldwide copyright laws and treaty
* provisions. No part of the Material may be used, copied, reproduced,
* modified, published, uploaded, posted, transmitted, distributed or disclosed
* in any way without Intel's prior express written permission. No license under
* any patent, copyright or other intellectual property rights in the Material
* is granted to or conferred upon you, either expressly, by implication,
* inducement, estoppel or otherwise. Any license under such intellectual
* property rights must be express and approved by Intel in writing.
*
* Unless otherwise agreed by Intel in writing, you may not remove or alter this
* notice or any other notice embedded in Materials by Intel or Intel's
* suppliers or licensors in any way.
*******************************************************************************/
/*
! Content:
! An example of using DFTI_THREAD_LIMIT configuration parameter.
! The parameter specifies maximum number of OpenMP threads FFT can use.
!
! Values:
! 0 (default) = use number of threads specified by
! mkl_[domain_]set_num_threads()
! Any positive integer N = use not more than N threads
!
!****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include "mkl_dfti.h"
#include "mkl_service.h"
#if defined(_OPENMP)
#include <omp.h>
#endif
#define REAL double
typedef struct { REAL real,imag; } COMPLEX;
static void init(COMPLEX *x, int N1, int N2, int N3, int H1, int H2, int H3);
static int verify(COMPLEX *x, int N1, int N2, int N3, int H1, int H2, int H3);
/* Define the format to printf MKL_LONG values */
#if !defined(MKL_ILP64)
#define LI "%li"
#else
#define LI "%lli"
#endif
int run_dft
(
int tid, /* Id of this thread */
int tlimit, /* Thread limit */
int N1, int N2, int N3, /* Sizes of 3D transform */
int H1, int H2, int H3 /* Arbitrary harmonic used to verify FFT */
)
{
/* Execution status */
MKL_LONG status = 0;
/* Pointer to input/output data */
COMPLEX *x = 0;
DFTI_DESCRIPTOR_HANDLE hand = 0;
printf("Thread %i: 3D in-place FFT on %i threads\n", tid, tlimit);
printf("Thread %i: Create DFTI descriptor for %ix%ix%i FFT\n", tid, N1,N2,N3);
{
MKL_LONG N[3]; N[0] = N1; N[1] = N2; N[2] = N3;
status = DftiCreateDescriptor(&hand,
sizeof(REAL)==sizeof(float)
? DFTI_SINGLE : DFTI_DOUBLE,
DFTI_COMPLEX, 3, N);
if (0 != status) goto failed;
}
printf("Thread %i: Set thread limit %i\n", tid, tlimit);
status = DftiSetValue(hand, DFTI_THREAD_LIMIT, tlimit);
if (0 != status) goto failed;
/* If tlimit > 1 check if we linked with sequential MKL */
if (tlimit > 1)
{
/* Get thread limit of uncommitted descriptor */
MKL_LONG tl;
status = DftiGetValue(hand, DFTI_THREAD_LIMIT, &tl);
if (0 != status) goto failed;
printf("Thread %i: uncommitted descriptor thread limit %i %s\n",
tid, (int)tl, tl==1 ? "(sequential MKL)" : "");
}
printf("Thread %i: commit descriptor\n", tid);
status = DftiCommitDescriptor(hand);
if (0 != status) goto failed;
/* Get thread limit of committed descriptor */
{
MKL_LONG tl;
status = DftiGetValue(hand, DFTI_THREAD_LIMIT, &tl);
if (0 != status) goto failed;
printf("Thread %i: committed descriptor thread limit %i\n", tid, (int)tl);
}
printf("Thread %i: Allocate input/output array\n", tid);
x = (COMPLEX*)mkl_malloc(N1*N2*N3 * sizeof(COMPLEX), 64);
if (0 == x) goto failed;
printf("Thread %i: Initialize input\n", tid);
init(x, N1, N2, N3, H1, H2, H3);
printf("Thread %i: Compute forward transform\n", tid);
status = DftiComputeForward(hand, x);
if (0 != status) goto failed;
printf("Thread %i: Verify the result\n", tid);
status = verify(x, N1, N2, N3, H1, H2, H3);
if (0 != status) goto failed;
cleanup:
printf("Thread %i: Free DFTI descriptor\n", tid);
DftiFreeDescriptor(&hand);
printf("Thread %i: Free data array\n", tid);
mkl_free(x);
printf("Thread %i: Subtest %s\n", tid, 0==status ? "Passed" : "Failed");
return status;
failed:
printf("Thread %i: ERROR, status = "LI"\n", tid, status);
status = 1;
goto cleanup;
}
int main()
{
/* Number of parallel user threads */
#if defined(_OPENMP)
int NUT = 2;
#endif
int failed = 0;
char version[DFTI_VERSION_LENGTH];
/* Enable nested parallel OpenMP sections (maybe oversubscribed) */
#if defined(_OPENMP)
omp_set_nested(1);
omp_set_dynamic(0);
#endif
/* Enable threading of MKL called from OpenMP parallel sections */
MKL_Set_Dynamic(0);
DftiGetValue(0, DFTI_VERSION, version);
printf("%s\n", version);
printf("Example config_thread_limit\n");
#if defined(_OPENMP)
printf("Run parallel FFTs on %i parallel threads\n",NUT);
#pragma omp parallel num_threads(NUT)
#else
printf("Run parallel FFT on a single thread\n");
#endif
{
/* Two threads running DFT on different number of threads */
int err;
#if defined(_OPENMP)
int me = omp_get_thread_num();
int team = omp_get_num_threads();
#else
int me = 0;
int team = 1;
#endif
if (me == 0)
printf("Thread %i: parallel team is %i threads\n",me,team);
if (me)
{
err = run_dft(me, 2, 100,200,300, -1,-2,-3);
}
else
{
err = run_dft(me, 3, 300,100,200, -1,-2,-3);
}
if (err)
{
failed = err;
}
}
printf("TEST %s\n",failed ? "FAILED" : "PASSED");
return failed;
}
/* Compute (K*L)%M accurately */
static double moda(int K, int L, int M)
{
return (double)(((long long)K * L) % M);
}
/* Initialize array with harmonic {H1, H2, H3} */
static void init(COMPLEX *x, int N1, int N2, int N3, int H1, int H2, int H3)
{
double TWOPI = 6.2831853071795864769, phase;
int n1, n2, n3, index;
/* Generalized strides for row-major addressing of x */
int S1 = N2*N3, S2 = N3, S3 = 1;
for (n1 = 0; n1 < N1; n1++)
{
for (n2 = 0; n2 < N2; n2++)
{
for (n3 = 0; n3 < N3; n3++)
{
phase = moda(n1,H1,N1) / N1;
phase += moda(n2,H2,N2) / N2;
phase += moda(n3,H3,N3) / N3;
index = n1*S1 + n2*S2 + n3*S3;
x[index].real = cos( TWOPI * phase ) / (N1*N2*N3);
x[index].imag = sin( TWOPI * phase ) / (N1*N2*N3);
}
}
}
}
/* Verify that x(n1,n2,n3) are unit peaks at H1,H2,H3 */
static int verify(COMPLEX *x, int N1, int N2, int N3, int H1, int H2, int H3)
{
double err, errthr, maxerr;
int n1, n2, n3, index;
/* Generalized strides for row-major addressing of x */
int S1 = N2*N3, S2 = N3, S3 = 1;
/*
* Note, this simple error bound doesn't take into account error of
* input data
*/
errthr = 5.0 * log( (double)N1*N2*N3 ) / log(2.0)
* (sizeof(REAL)==sizeof(float) ? FLT_EPSILON : DBL_EPSILON);
printf(" Verify the result, errthr = %.3lg\n", errthr);
maxerr = 0;
for (n1 = 0; n1 < N1; n1++)
{
for (n2 = 0; n2 < N2; n2++)
{
for (n3 = 0; n3 < N3; n3++)
{
double re_exp = 0.0, im_exp = 0.0, re_got, im_got;
if ((n1-H1)%N1==0 && (n2-H2)%N2==0 && (n3-H3)%N3==0)
{
re_exp = 1;
}
index = n1*S1 + n2*S2 + n3*S3;
re_got = x[index].real;
im_got = x[index].imag;
err = fabs(re_got - re_exp) + fabs(im_got - im_exp);
if (err > maxerr) maxerr = err;
if (!(err <= errthr))
{
printf(" x[%i][%i][%i]: ",n1,n2,n3);
printf(" expected (%.17lg,%.17lg), ",re_exp,im_exp);
printf(" got (%.17lg,%.17lg), ",re_got,im_got);
printf(" err %.3lg\n", err);
printf(" Verification FAILED\n");
return 1;
}
}
}
}
printf(" Verified, maximum error was %.3lg\n", maxerr);
return 0;
}
|
data.c | #include "../mesh.h"
#include "../params.h"
#include "../shared.h"
#include "../umesh.h"
#include "../shared_data.h"
#include "shared.h"
#include <math.h>
#include <stdlib.h>
// Allocates a double precision array
size_t allocate_data(double** buf, size_t len) {
if(len == 0) {
return 0;
}
#ifdef RAJA_USE_CUDA
#ifdef CUDA_MANAGED_MEM
gpu_check(cudaMallocManaged((void**)buf, sizeof(double) * len));
#else
gpu_check(cudaMalloc((void**)buf, sizeof(double) * len));
#endif // CUDA_MANAGED_MEM
double* local_buf = *buf;
RAJA::forall<exec_policy>(RAJA::RangeSegment(0, len), [=] RAJA_DEVICE (int i) {
local_buf[i] = 0.0;
});
#else
allocate_host_data(buf, len);
#endif // RAJA_USE_CUDA
return sizeof(double) * len;
}
// Allocates a single precision array
size_t allocate_float_data(float** buf, size_t len) {
if(len == 0) {
return 0;
}
#ifdef RAJA_USE_CUDA
#ifdef CUDA_MANAGED_MEM
gpu_check(cudaMallocManaged((void**)buf, sizeof(double) * len));
#else
gpu_check(cudaMalloc((void**)buf, sizeof(double) * len));
#endif // CUDA_MANAGED_MEM
float* local_buf = *buf;
RAJA::forall<exec_policy>(RAJA::RangeSegment(0, len), [=] RAJA_DEVICE (int i) {
local_buf[i] = 0.0f;
});
#else
allocate_host_float_data(buf, len);
#endif // RAJA_USE_CUDA
return sizeof(double) * len;
}
// Allocates a 32-bit integer array
size_t allocate_int_data(int** buf, size_t len) {
if(len == 0) {
return 0;
}
#ifdef RAJA_USE_CUDA
#ifdef CUDA_MANAGED_MEM
gpu_check(cudaMallocManaged((void**)buf, sizeof(int) * len));
#else
gpu_check(cudaMalloc((void**)buf, sizeof(int) * len));
#endif // CUDA_MANAGED_MEM
int* local_buf = *buf;
RAJA::forall<exec_policy>(RAJA::RangeSegment(0, len), [=] RAJA_DEVICE (int i) {
local_buf[i] = 0;
});
#else
allocate_host_int_data(buf, len);
#endif // RAJA_USE_CUDA
return sizeof(int) * len;
}
// Allocates a 64-bit integer array
size_t allocate_uint64_data(uint64_t** buf, const size_t len) {
if(len == 0) {
return 0;
}
#ifdef RAJA_USE_CUDA
#ifdef CUDA_MANAGED_MEM
gpu_check(cudaMallocManaged((void**)buf, sizeof(uint64_t) * len));
#else
gpu_check(cudaMalloc((void**)buf, sizeof(uint64_t) * len));
#endif // CUDA_MANAGED_MEM
#else
#ifdef INTEL
*buf = (uint64_t*)_mm_malloc(sizeof(uint64_t) * len, VEC_ALIGN);
#else
*buf = (uint64_t*)malloc(sizeof(uint64_t) * len);
#endif // INTEL
if (*buf == NULL) {
TERMINATE("Failed to allocate a data array.\n");
}
#endif // RAJA_USE_CUDA
uint64_t* local_buf = *buf;
RAJA::forall<exec_policy>(RAJA::RangeSegment(0, len), [=] RAJA_DEVICE (int i) {
local_buf[i] = 0;
});
return sizeof(uint64_t) * len;
}
// Allocates a complex double array
size_t allocate_complex_double_data(_Complex double** buf, const size_t len) {
TERMINATE("Not implemented\n");
}
// Allocates a host copy of some buffer
void allocate_host_data(double** buf, size_t len) {
#ifdef INTEL
*buf = (double*)_mm_malloc(sizeof(double) * len, VEC_ALIGN);
#else
*buf = (double*)malloc(sizeof(double) * len);
#endif // INTEL
if (*buf == NULL) {
TERMINATE("Failed to allocate a data array.\n");
}
#pragma omp parallel for
for(int i = 0; i < len; ++i) {
(*buf)[i] = 0;
}
}
// Allocates a host copy of some buffer
void allocate_host_int_data(int** buf, size_t len) {
#ifdef INTEL
*buf = (int*)_mm_malloc(sizeof(int) * len, VEC_ALIGN);
#else
*buf = (int*)malloc(sizeof(int) * len);
#endif // INTEL
if (*buf == NULL) {
TERMINATE("Failed to allocate a data array.\n");
}
#pragma omp parallel for
for(int i = 0; i < len; ++i) {
(*buf)[i] = 0;
}
}
void allocate_host_float_data(float** buf, size_t len) {
#ifdef INTEL
*buf = (float*)_mm_malloc(sizeof(float) * len, VEC_ALIGN);
#else
*buf = (float*)malloc(sizeof(float) * len);
#endif // INTEL
if (*buf == NULL) {
TERMINATE("Failed to allocate a data array.\n");
}
#pragma omp parallel for
for(int i = 0; i < len; ++i) {
(*buf)[i] = 0;
}
}
// Deallocate a double array
void deallocate_data(double* buf) {
#ifdef RAJA_USE_CUDA
gpu_check(cudaFree(buf));
#else
#ifdef INTEL
_mm_free(buf);
#else
free(buf);
#endif
#endif // RAJA_USE CUDA
}
// Deallocates a float array
void deallocate_float_data(float* buf) {
#ifdef RAJA_USE_CUDA
gpu_check(cudaFree(buf));
#else
#ifdef INTEL
_mm_free(buf);
#else
free(buf);
#endif
#endif // RAJA_USE CUDA
}
// Deallocation of host data
void deallocate_host_data(double* buf) {
#ifdef RAJA_USE_CUDA
#ifdef INTEL
_mm_free(buf);
#else
free(buf);
#endif
#endif
}
// Deallocates a 32-bit integer array
void deallocate_int_data(int* buf) {
#ifdef RAJA_USE_CUDA
gpu_check(cudaFree(buf));
#else
#ifdef INTEL
_mm_free(buf);
#else
free(buf);
#endif
#endif // RAJA_USE CUDA
}
// Deallocates a 64-bit integer array
void deallocate_uint64_t_data(uint64_t* buf) {
#ifdef RAJA_USE_CUDA
gpu_check(cudaFree(buf));
#else
#ifdef INTEL
_mm_free(buf);
#else
free(buf);
#endif
#endif // RAJA_USE CUDA
}
// Deallocates complex double data
void deallocate_complex_double_data(_Complex double* buf) {
#ifdef RAJA_USE_CUDA
gpu_check(cudaFree(buf));
#else
#ifdef INTEL
_mm_free(buf);
#else
free(buf);
#endif
#endif // RAJA_USE CUDA
}
// Allocates a data array
void deallocate_host_int_data(int* buf) {
#ifdef INTEL
_mm_free(buf);
#else
free(buf);
#endif
}
// Just swaps the buffers on the host
void copy_buffer(const size_t len, double** src, double** dst, int send) {
#ifdef RAJA_USE_CUDA
if (send) {
gpu_check(
cudaMemcpy(*dst, *src, sizeof(double) * len, cudaMemcpyHostToDevice));
} else {
gpu_check(
cudaMemcpy(*dst, *src, sizeof(double) * len, cudaMemcpyDeviceToHost));
}
gpu_check(cudaDeviceSynchronize());
#else
double* temp = *src;
*src = *dst;
*dst = temp;
#endif
}
// Just swaps the buffers on the host
void copy_int_buffer(const size_t len, int** src, int** dst, int send) {
#ifdef RAJA_USE_CUDA
if (send) {
gpu_check(
cudaMemcpy(*dst, *src, sizeof(int) * len, cudaMemcpyHostToDevice));
} else {
gpu_check(
cudaMemcpy(*dst, *src, sizeof(int) * len, cudaMemcpyDeviceToHost));
}
gpu_check(cudaDeviceSynchronize());
#else
int* temp = *src;
*src = *dst;
*dst = temp;
#endif
}
// Move a host buffer onto the device
void move_host_buffer_to_device(const size_t len, double** src, double** dst) {
#ifdef RAJA_USE_CUDA
allocate_data(dst, len);
copy_buffer(len, src, dst, SEND);
deallocate_host_data(*src);
#else
copy_buffer(len, src, dst, SEND);
#endif
}
// Initialises mesh data in device specific manner
void mesh_data_init_2d(const int local_nx, const int local_ny,
const int global_nx, const int global_ny, const int pad,
const int x_off, const int y_off, const double width,
const double height, double* edgex, double* edgey,
double* edgedx, double* edgedy, double* celldx,
double* celldy) {
// Simple uniform rectilinear initialisation
RAJA::forall<exec_policy>(RAJA::RangeSegment(0, local_nx+1), [=] RAJA_DEVICE (int ii) {
edgedx[ii] = width / (global_nx);
// Note: correcting for padding
edgex[ii] = edgedx[ii] * (x_off + ii - pad);
});
RAJA::forall<exec_policy>(RAJA::RangeSegment(0, local_nx), [=] RAJA_DEVICE (int ii) {
celldx[ii] = width / (global_nx);
});
RAJA::forall<exec_policy>(RAJA::RangeSegment(0, local_ny+1), [=] RAJA_DEVICE (int ii) {
edgedy[ii] = height / (global_ny);
// Note: correcting for padding
edgey[ii] = edgedy[ii] * (y_off + ii - pad);
});
RAJA::forall<exec_policy>(RAJA::RangeSegment(0, local_ny), [=] RAJA_DEVICE (int ii) {
celldy[ii] = height / (global_ny);
});
}
// Initialises mesh data in device specific manner
void mesh_data_init_3d(const int local_nx, const int local_ny,
const int local_nz, const int global_nx,
const int global_ny, const int global_nz, const int pad,
const int x_off, const int y_off, const int z_off,
const double width, const double height,
const double depth, double* edgex, double* edgey,
double* edgez, double* edgedx, double* edgedy,
double* edgedz, double* celldx, double* celldy,
double* celldz) {
// Initialise as in the 2d case
mesh_data_init_2d(local_nx, local_ny, global_nx, global_ny, pad, x_off, y_off,
width, height, edgex, edgey, edgedx, edgedy, celldx,
celldy);
// Simple uniform rectilinear initialisation
RAJA::forall<exec_policy>(RAJA::RangeSegment(0, local_nz+1), [=] RAJA_DEVICE (int ii) {
edgedz[ii] = depth / (global_nz);
edgez[ii] = edgedz[ii] * (z_off + ii - pad);
});
RAJA::forall<exec_policy>(RAJA::RangeSegment(0, local_nz), [=] RAJA_DEVICE (int ii) {
celldz[ii] = depth / (global_nz);
});
}
// Initialise state data in device specific manner
void set_problem_2d(const int local_nx, const int local_ny, const int pad,
const double mesh_width, const double mesh_height,
const double* edgex, const double* edgey, const int ndims,
const char* problem_def_filename, double* density, double* energy,
double* temperature) {
int* h_keys;
int* d_keys;
allocate_int_data(&d_keys, MAX_KEYS);
allocate_host_int_data(&h_keys, MAX_KEYS);
double* h_values;
double* d_values;
allocate_data(&d_values, MAX_KEYS);
allocate_host_data(&h_values, MAX_KEYS);
int nentries = 0;
while (1) {
char specifier[MAX_STR_LEN];
char keys[MAX_STR_LEN * MAX_KEYS];
sprintf(specifier, "problem_%d", nentries++);
int nkeys = 0;
if (!get_key_value_parameter(specifier, problem_def_filename, keys,
h_values, &nkeys)) {
break;
}
// The last four keys are the bound specification
double xpos = h_values[nkeys - 4] * mesh_width;
double ypos = h_values[nkeys - 3] * mesh_height;
double width = h_values[nkeys - 2] * mesh_width;
double height = h_values[nkeys - 1] * mesh_height;
for (int kk = 0; kk < nkeys - (2 * ndims); ++kk) {
const char* key = &keys[kk * MAX_STR_LEN];
if (strmatch(key, "density")) {
h_keys[kk] = DENSITY_KEY;
} else if (strmatch(key, "energy")) {
h_keys[kk] = ENERGY_KEY;
} else if (strmatch(key, "temperature")) {
h_keys[kk] = TEMPERATURE_KEY;
} else {
TERMINATE("Found unrecognised key in %s : %s.\n", problem_def_filename,
key);
}
}
copy_int_buffer(MAX_KEYS, &h_keys, &d_keys, SEND);
copy_buffer(MAX_KEYS, &h_values, &d_values, SEND);
RAJA::forall<exec_policy>(RAJA::RangeSegment(0, local_nx*local_ny), [=] RAJA_DEVICE (int i) {
const int ii = i / local_nx;
const int jj = i % local_nx;
double global_xpos = edgex[jj];
double global_ypos = edgey[ii];
// Check we are in bounds of the problem entry
if (global_xpos >= xpos &&
global_ypos >= ypos &&
global_xpos < xpos + width &&
global_ypos < ypos + height) {
// The upper bound excludes the bounding box for the entry
for (int nn = 0; nn < nkeys - (2 * ndims); ++nn) {
const int key = d_keys[nn];
if (key == DENSITY_KEY) {
density[i] = d_values[nn];
} else if (key == ENERGY_KEY) {
energy[i] = d_values[nn];
} else if (key == TEMPERATURE_KEY) {
temperature[i] = d_values[nn];
}
}
}
});
}
deallocate_host_int_data(h_keys);
deallocate_host_data(h_values);
}
// Initialise state data in device specific manner
void set_problem_3d(const int local_nx, const int local_ny, const int local_nz,
const int pad, const double mesh_width,
const double mesh_height, const double mesh_depth,
const double* edgex, const double* edgey,
const double* edgez, const int ndims,
const char* problem_def_filename, double* density, double* energy,
double* temperature) {
int* h_keys;
int* d_keys;
double* h_values;
double* d_values;
allocate_host_int_data(&h_keys, MAX_KEYS);
allocate_host_data(&h_values, MAX_KEYS);
allocate_int_data(&d_keys, MAX_KEYS);
allocate_data(&d_values, MAX_KEYS);
int nentries = 0;
while (1) {
char specifier[MAX_STR_LEN];
char keys[MAX_STR_LEN * MAX_KEYS];
sprintf(specifier, "problem_%d", nentries++);
int nkeys = 0;
if (!get_key_value_parameter(specifier, problem_def_filename, keys, h_values,
&nkeys)) {
break;
}
// The last four keys are the bound specification
double xpos = h_values[nkeys - 6] * mesh_width;
double ypos = h_values[nkeys - 5] * mesh_height;
double zpos = h_values[nkeys - 4] * mesh_depth;
double width = h_values[nkeys - 3] * mesh_width;
double height = h_values[nkeys - 2] * mesh_height;
double depth = h_values[nkeys - 1] * mesh_depth;
for (int kk = 0; kk < nkeys - (2 * ndims); ++kk) {
const char* key = &keys[kk * MAX_STR_LEN];
if (strmatch(key, "density")) {
h_keys[kk] = DENSITY_KEY;
} else if (strmatch(key, "energy")) {
h_keys[kk] = ENERGY_KEY;
} else if (strmatch(key, "temperature")) {
h_keys[kk] = TEMPERATURE_KEY;
} else {
TERMINATE("Found unrecognised key in %s : %s.\n", problem_def_filename,
key);
}
}
copy_int_buffer(MAX_KEYS, &h_keys, &d_keys, SEND);
copy_buffer(MAX_KEYS, &h_values, &d_values, SEND);
RAJA::forall<exec_policy>(RAJA::RangeSegment(0, local_nx*local_ny*local_nz), [=] RAJA_DEVICE (int i) {
const int ii = (i / local_nx*local_ny);
const int jj = (i / local_nx) % local_ny;
const int kk = (i % local_nx);
double global_xpos = edgex[kk];
double global_ypos = edgey[jj];
double global_zpos = edgez[ii];
// Check we are in bounds of the problem entry
if (global_xpos >= xpos &&
global_ypos >= ypos &&
global_zpos >= zpos &&
global_xpos < xpos + width &&
global_ypos < ypos + height &&
global_zpos < zpos + depth) {
// The upper bound excludes the bounding box for the entry
for (int nn = 0; nn < nkeys - (2 * ndims); ++nn) {
const int key = d_keys[nn];
if (key == DENSITY_KEY) {
density[i] = d_values[nn];
} else if (key == ENERGY_KEY) {
energy[i] = d_values[nn];
} else if (key == TEMPERATURE_KEY) {
temperature[i] = d_values[nn];
}
}
}
});
}
deallocate_host_int_data(h_keys);
deallocate_host_data(h_values);
}
// Finds the normals for all boundary cells
void find_boundary_normals(UnstructuredMesh* umesh, int* boundary_face_list) {
TERMINATE("%s not yet implemented.", __func__);
}
// Finds the normals for all boundary cells
void find_boundary_normals_3d(UnstructuredMesh* umesh,
int* boundary_face_list) {
TERMINATE("%s not yet implemented.", __func__);
}
|
a.36.1.c | /* { dg-do run } */
#include <omp.h>
#include <stdlib.h>
void
do_by_16 (float *x, int iam, int ipoints)
{
}
void
a36 (float *x, int npoints)
{
int iam, ipoints;
omp_set_dynamic (0);
omp_set_num_threads (16);
#pragma omp parallel shared(x, npoints) private(iam, ipoints)
{
if (omp_get_num_threads () != 16)
abort ();
iam = omp_get_thread_num ();
ipoints = npoints / 16;
do_by_16 (x, iam, ipoints);
}
}
int main()
{
float a[10];
a36 (a, 10);
return 0;
}
|
GB_unop__identity_fc64_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_fc32)
// op(A') function: GB (_unop_tran__identity_fc64_fc32)
// C type: GxB_FC64_t
// A type: GxB_FC32_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_fc32)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) crealf (aij), (double) cimagf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hybrid_pi.c | #include "mpi.h"
#include <stdio.h>
#include <math.h>
int main( int argc, char **argv ) {
const long int num_steps = 1000000000;
double step, x, sum, total_sum, pi, start, stop, min_start, max_stop;
int this_proc, num_procs, remainder;
long int my_slice[2];
int i;
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&num_procs);
MPI_Comm_rank(MPI_COMM_WORLD,&this_proc);
if (this_proc == 0){
printf("Calculating PI using:"
" %ld slices\n"
" %d MPI tasks\n"
" %d OpenMP threads per MPI task\n"
"Worker checkins:\n",
num_steps, num_procs, omp_get_max_threads());
}
MPI_Barrier(MPI_COMM_WORLD);
start = MPI_Wtime();
sum = 0.0;
step = (double) 1 / num_steps;
my_slice[0] = this_proc * num_steps / num_procs;
my_slice[1] = (this_proc + 1) * num_steps / num_procs - 1;
printf(" MPI task %d calculating slices [%ld-%ld)\n", this_proc, my_slice[0], my_slice[1]);
#pragma omp parallel default(shared) private(i,x) reduction(+:sum)
{
printf(" MPI task %d, OpenMP thread %d calculating automatic work allocation\n", this_proc, omp_get_thread_num());
#pragma omp for
for(i=my_slice[0]; i<my_slice[1];i++) {
x = (i - 0.5) * step;
sum += (double) 4 / (1 + x*x);
}
}
MPI_Reduce(&sum, &total_sum, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
pi = total_sum * step;
stop = MPI_Wtime();
MPI_Barrier(MPI_COMM_WORLD);
MPI_Reduce(&start, &min_start, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD);
MPI_Reduce(&stop, &max_stop, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
if (this_proc == 0) {
printf("Obtained value of PI: %.15g\n", pi);
printf("Time taken: %g seconds\n", max_stop - min_start);
}
MPI_Finalize();
return 0;
}
|
FalseSharing.c | #include<stdlib.h>
#include<stdio.h>
#include<omp.h>
struct s {
float value;
int pad[NUMPAD];
}Array[4];
int main () {
int i,j;
i=0;j=0;
const int SomeBigNumber = 100000000; // keep less than 2B
omp_set_num_threads(NUMT);
double time0=omp_get_wtime();
#pragma omp parallel for default(none) private (i,j) shared(Array)
for(i = 0; i < 4; i++) {
unsigned int seed = 0;
for(j = 0; j < SomeBigNumber; j++ ) {
Array[ i ].value = Array[ i ].value + (float)rand_r(&seed);
}
}
double time1=omp_get_wtime();
double execution_time=time1-time0;
printf("Execution time = %lf\n",execution_time);
return 0;
}
|
GB_unop__ceil_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ceil_fp64_fp64)
// op(A') function: GB (_unop_tran__ceil_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = ceil (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ceil (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = ceil (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CEIL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ceil_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = ceil (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = ceil (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ceil_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dtype_transfer.c | /*
* This file contains low-level loops for data type transfers.
* In particular the function PyArray_GetDTypeTransferFunction is
* implemented here.
*
* Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com)
* The University of British Columbia
*
* See LICENSE.txt for the license.
*/
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#include "structmember.h"
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define PY_ARRAY_UNIQUE_SYMBOL MICPY_ARRAY_API
#define NO_IMPORT_ARRAY
#include <numpy/arrayobject.h>
#include <numpy/npy_cpu.h>
#include "npy_pycompat.h"
#define _MICARRAYMODULE
#include "arrayobject.h"
#include "convert_datatype.h"
#include "creators.h"
//#include "_datetime.h"
//#include "datetime_strings.h"
#include "mpy_lowlevel_strided_loops.h"
#include "common.h"
#include "dtype_transfer.h"
#include "shape.h"
#include "lowlevel_strided_loops.h"
#define NPY_LOWLEVEL_BUFFER_BLOCKSIZE 128
/********** PRINTF DEBUG TRACING **************/
#define NPY_DT_DBG_TRACING 0
/* Tracing incref/decref can be very noisy */
#define NPY_DT_REF_DBG_TRACING 0
#if NPY_DT_REF_DBG_TRACING
#define NPY_DT_DBG_REFTRACE(msg, ref) \
printf("%-12s %20p %s%d%s\n", msg, ref, \
ref ? "(refcnt " : "", \
ref ? (int)ref->ob_refcnt : 0, \
ref ? ((ref->ob_refcnt <= 0) ? \
") <- BIG PROBLEM!!!!" : ")") : ""); \
fflush(stdout);
#else
#define NPY_DT_DBG_REFTRACE(msg, ref)
#endif
/**********************************************/
/*
* Returns a transfer function which DECREFs any references in src_type.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
/*static int
get_decsrcref_transfer_function(int aligned,
npy_intp src_stride,
PyArray_Descr *src_dtype,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api);*/
/*
* Returns a transfer function which zeros out the dest values.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
/*static int
get_setdstzero_transfer_function(int aligned,
npy_intp dst_stride,
PyArray_Descr *dst_dtype,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api);
*/
/*
* Returns a transfer function which sets a boolean type to ones.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
/*NPY_NO_EXPORT int
get_bool_setdstone_transfer_function(npy_intp dst_stride,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *NPY_UNUSED(out_needs_api));*/
/*************************** DEST SETZERO *******************************/
/* Sets dest to zero */
typedef struct {
NpyAuxData base;
npy_intp dst_itemsize;
} _dst_memset_zero_data;
/* zero-padded data copy function */
static NpyAuxData *_dst_memset_zero_data_clone(NpyAuxData *data)
{
_dst_memset_zero_data *newdata =
(_dst_memset_zero_data *)PyArray_malloc(
sizeof(_dst_memset_zero_data));
if (newdata == NULL) {
return NULL;
}
memcpy(newdata, data, sizeof(_dst_memset_zero_data));
return (NpyAuxData *)newdata;
}
static void
_null_to_strided_memset_zero(void *_dst,
npy_intp dst_stride,
void *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
NpyAuxData *data, int device)
{
_dst_memset_zero_data *d = (_dst_memset_zero_data *)data;
npy_intp dst_itemsize = d->dst_itemsize;
#pragma omp target device(device) map(to: N, _dst, dst_stride, dst_itemsize)
{
char *dst = (char *) _dst;
while (N--) {
memset(dst, 0, dst_itemsize);
dst += dst_stride;
}
}
}
static void
_null_to_contig_memset_zero(void *dst,
npy_intp dst_stride,
void *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
NpyAuxData *data, int device)
{
_dst_memset_zero_data *d = (_dst_memset_zero_data *)data;
npy_intp dst_itemsize = d->dst_itemsize;
target_memset(dst, 0, N*dst_itemsize, device);
}
NPY_NO_EXPORT int
get_setdstzero_transfer_function(int aligned,
npy_intp dst_stride,
PyArray_Descr *dst_dtype,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api)
{
_dst_memset_zero_data *data;
/* If there are no references, just set the whole thing to zero */
if (!PyDataType_REFCHK(dst_dtype)) {
data = (_dst_memset_zero_data *)
PyArray_malloc(sizeof(_dst_memset_zero_data));
if (data == NULL) {
PyErr_NoMemory();
return NPY_FAIL;
}
data->base.free = (NpyAuxData_FreeFunc *)(&PyArray_free);
data->base.clone = &_dst_memset_zero_data_clone;
data->dst_itemsize = dst_dtype->elsize;
if (dst_stride == data->dst_itemsize) {
*out_stransfer = &_null_to_contig_memset_zero;
}
else {
*out_stransfer = &_null_to_strided_memset_zero;
}
*out_transferdata = (NpyAuxData *)data;
}
/* If it's exactly one reference, use the decref function */
else if (dst_dtype->type_num == NPY_OBJECT) {
if (out_needs_api) {
*out_needs_api = 1;
}
*out_stransfer = NULL;
*out_transferdata = NULL;
return NPY_FAIL;
}
/* If there are subarrays, need to wrap it */
else if (PyDataType_HASSUBARRAY(dst_dtype)) {
//TODO: implement later
*out_stransfer = NULL;
*out_transferdata = NULL;
return NPY_FAIL;
}
/* If there are fields, need to do each field */
else if (PyDataType_HASFIELDS(dst_dtype)) {
*out_stransfer = NULL;
*out_transferdata = NULL;
return NPY_FAIL;
}
return NPY_SUCCEED;
}
static void
_dec_src_ref_nop(void *NPY_UNUSED(dst),
npy_intp NPY_UNUSED(dst_stride),
void *NPY_UNUSED(src), npy_intp NPY_UNUSED(src_stride),
npy_intp NPY_UNUSED(N),
npy_intp NPY_UNUSED(src_itemsize),
NpyAuxData *NPY_UNUSED(data), int device)
{
/* NOP */
}
/***************** WRAP ALIGNED CONTIGUOUS TRANSFER FUNCTION **************/
/* Wraps a transfer function + data in alignment code */
typedef struct {
NpyAuxData base;
int device;
PyMicArray_StridedUnaryOp *wrapped,
*tobuffer, *frombuffer;
NpyAuxData *wrappeddata, *todata, *fromdata;
npy_intp src_itemsize, dst_itemsize;
char *bufferin, *bufferout;
} _align_wrap_data;
/* transfer data free function */
static void _align_wrap_data_free(NpyAuxData *data)
{
_align_wrap_data *d = (_align_wrap_data *)data;
NPY_AUXDATA_FREE(d->wrappeddata);
NPY_AUXDATA_FREE(d->todata);
NPY_AUXDATA_FREE(d->fromdata);
target_free(d->bufferin, d->device);
PyArray_free(data);
}
/* transfer data copy function */
static NpyAuxData *_align_wrap_data_clone(NpyAuxData *data)
{
_align_wrap_data *d = (_align_wrap_data *)data;
_align_wrap_data *newdata;
npy_intp basedatasize, buffersize, datasize;
/* Round up the structure size to 16-byte boundary */
basedatasize = (sizeof(_align_wrap_data)+15)&(-0x10);
/* Add space for two low level buffers */
buffersize = NPY_LOWLEVEL_BUFFER_BLOCKSIZE*d->src_itemsize +
NPY_LOWLEVEL_BUFFER_BLOCKSIZE*d->dst_itemsize;
datasize = basedatasize + buffersize;
/* Allocate the data, and populate it */
newdata = (_align_wrap_data *)PyArray_malloc(basedatasize);
if (newdata == NULL) {
return NULL;
}
memcpy(newdata, data, basedatasize);
//newdata->bufferin = (char *)newdata + basedatasize;
newdata->bufferin = (char *) target_alloc(buffersize, newdata->device);
if (newdata->bufferin == NULL) {
PyArray_free(newdata);
return NULL;
}
newdata->bufferout = newdata->bufferin +
NPY_LOWLEVEL_BUFFER_BLOCKSIZE*newdata->src_itemsize;
if (newdata->wrappeddata != NULL) {
newdata->wrappeddata = NPY_AUXDATA_CLONE(d->wrappeddata);
if (newdata->wrappeddata == NULL) {
target_free(newdata->bufferin, newdata->device);
PyArray_free(newdata);
return NULL;
}
}
if (newdata->todata != NULL) {
newdata->todata = NPY_AUXDATA_CLONE(d->todata);
if (newdata->todata == NULL) {
NPY_AUXDATA_FREE(newdata->wrappeddata);
target_free(newdata->bufferin, newdata->device);
PyArray_free(newdata);
return NULL;
}
}
if (newdata->fromdata != NULL) {
newdata->fromdata = NPY_AUXDATA_CLONE(d->fromdata);
if (newdata->fromdata == NULL) {
NPY_AUXDATA_FREE(newdata->wrappeddata);
NPY_AUXDATA_FREE(newdata->todata);
target_free(newdata->bufferin, newdata->device);
PyArray_free(newdata);
return NULL;
}
}
return (NpyAuxData *)newdata;
}
static void
_strided_to_strided_contig_align_wrap(void *dst, npy_intp dst_stride,
void *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
NpyAuxData *data, int device)
{
_align_wrap_data *d = (_align_wrap_data *)data;
PyMicArray_StridedUnaryOp *wrapped = d->wrapped,
*tobuffer = d->tobuffer,
*frombuffer = d->frombuffer;
npy_intp inner_src_itemsize = d->src_itemsize,
dst_itemsize = d->dst_itemsize;
NpyAuxData *wrappeddata = d->wrappeddata,
*todata = d->todata,
*fromdata = d->fromdata;
char *bufferin = d->bufferin, *bufferout = d->bufferout;
if (d->device != device) {
return;
}
for(;;) {
if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) {
tobuffer(bufferin, inner_src_itemsize, src, src_stride,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
src_itemsize, todata, device);
wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
inner_src_itemsize, wrappeddata, device);
frombuffer(dst, dst_stride, bufferout, dst_itemsize,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
dst_itemsize, fromdata, device);
N -= NPY_LOWLEVEL_BUFFER_BLOCKSIZE;
src += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_stride;
dst += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_stride;
}
else {
tobuffer(bufferin, inner_src_itemsize, src, src_stride, N,
src_itemsize, todata, device);
wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize, N,
inner_src_itemsize, wrappeddata, device);
frombuffer(dst, dst_stride, bufferout, dst_itemsize, N,
dst_itemsize, fromdata, device);
return;
}
}
}
static void
_strided_to_strided_contig_align_wrap_init_dest(void *dst, npy_intp dst_stride,
void *src, npy_intp src_stride,
npy_intp N, npy_intp src_itemsize,
NpyAuxData *data, int device)
{
_align_wrap_data *d = (_align_wrap_data *)data;
PyMicArray_StridedUnaryOp *wrapped = d->wrapped,
*tobuffer = d->tobuffer,
*frombuffer = d->frombuffer;
npy_intp inner_src_itemsize = d->src_itemsize,
dst_itemsize = d->dst_itemsize;
NpyAuxData *wrappeddata = d->wrappeddata,
*todata = d->todata,
*fromdata = d->fromdata;
char *bufferin = d->bufferin, *bufferout = d->bufferout;
if (d->device != device) {
return;
}
for(;;) {
if (N > NPY_LOWLEVEL_BUFFER_BLOCKSIZE) {
tobuffer(bufferin, inner_src_itemsize, src, src_stride,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
src_itemsize, todata, device);
target_memset(bufferout, 0, dst_itemsize*NPY_LOWLEVEL_BUFFER_BLOCKSIZE, device);
wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
inner_src_itemsize, wrappeddata, device);
frombuffer(dst, dst_stride, bufferout, dst_itemsize,
NPY_LOWLEVEL_BUFFER_BLOCKSIZE,
dst_itemsize, fromdata, device);
N -= NPY_LOWLEVEL_BUFFER_BLOCKSIZE;
src += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_stride;
dst += NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_stride;
}
else {
tobuffer(bufferin, inner_src_itemsize, src, src_stride, N,
src_itemsize, todata, device);
target_memset(bufferout, 0, dst_itemsize*N, device);
wrapped(bufferout, dst_itemsize, bufferin, inner_src_itemsize, N,
inner_src_itemsize, wrappeddata, device);
frombuffer(dst, dst_stride, bufferout, dst_itemsize, N,
dst_itemsize, fromdata, device);
return;
}
}
}
/*
* Wraps an aligned contig to contig transfer function between either
* copies or byte swaps to temporary buffers.
*
* src_itemsize/dst_itemsize - The sizes of the src and dst datatypes.
* tobuffer - copy/swap function from src to an aligned contiguous buffer.
* todata - data for tobuffer
* frombuffer - copy/swap function from an aligned contiguous buffer to dst.
* fromdata - data for frombuffer
* wrapped - contig to contig transfer function being wrapped
* wrappeddata - data for wrapped
* init_dest - 1 means to memset the dest buffer to 0 before calling wrapped.
*
* Returns NPY_SUCCEED or NPY_FAIL.
*/
NPY_NO_EXPORT int
wrap_aligned_contig_transfer_function(
int device,
npy_intp src_itemsize, npy_intp dst_itemsize,
PyMicArray_StridedUnaryOp *tobuffer, NpyAuxData *todata,
PyMicArray_StridedUnaryOp *frombuffer, NpyAuxData *fromdata,
PyMicArray_StridedUnaryOp *wrapped, NpyAuxData *wrappeddata,
int init_dest,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata)
{
_align_wrap_data *data;
npy_intp basedatasize, buffersize, datasize;
/* Round up the structure size to 16-byte boundary */
basedatasize = (sizeof(_align_wrap_data)+15)&(-0x10);
/* Add space for two low level buffers */
buffersize = NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_itemsize +
NPY_LOWLEVEL_BUFFER_BLOCKSIZE*dst_itemsize;
datasize = basedatasize + buffersize;
/* Allocate the data, and populate it */
data = (_align_wrap_data *)PyArray_malloc(basedatasize);
if (data == NULL) {
PyErr_NoMemory();
return NPY_FAIL;
}
data->bufferin = (char *) target_alloc(buffersize, device);
if (data->bufferin == NULL) {
PyArray_free(data);
PyErr_NoMemory();
return NPY_FAIL;
}
data->base.free = &_align_wrap_data_free;
data->base.clone = &_align_wrap_data_clone;
data->device = device;
data->tobuffer = tobuffer;
data->todata = todata;
data->frombuffer = frombuffer;
data->fromdata = fromdata;
data->wrapped = wrapped;
data->wrappeddata = wrappeddata;
data->src_itemsize = src_itemsize;
data->dst_itemsize = dst_itemsize;
//data->bufferin = (char *)data + basedatasize;
data->bufferout = data->bufferin +
NPY_LOWLEVEL_BUFFER_BLOCKSIZE*src_itemsize;
/* Set the function and data */
if (init_dest) {
*out_stransfer = &_strided_to_strided_contig_align_wrap_init_dest;
}
else {
*out_stransfer = &_strided_to_strided_contig_align_wrap;
}
*out_transferdata = (NpyAuxData *)data;
return NPY_SUCCEED;
}
/*************************** DTYPE CAST FUNCTIONS *************************/
static int
get_nbo_cast_numeric_transfer_function(int aligned,
npy_intp src_stride, npy_intp dst_stride,
int src_type_num, int dst_type_num,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata)
{
/* Emit a warning if complex imaginary is being cast away */
if (PyTypeNum_ISCOMPLEX(src_type_num) &&
!PyTypeNum_ISCOMPLEX(dst_type_num) &&
!PyTypeNum_ISBOOL(dst_type_num)) {
PyObject *cls = NULL, *obj = NULL;
int ret;
obj = PyImport_ImportModule("numpy.core");
if (obj) {
cls = PyObject_GetAttrString(obj, "ComplexWarning");
Py_DECREF(obj);
}
ret = PyErr_WarnEx(cls,
"Casting complex values to real discards "
"the imaginary part", 1);
Py_XDECREF(cls);
if (ret < 0) {
return NPY_FAIL;
}
}
*out_stransfer = PyMicArray_GetStridedNumericCastFn(aligned,
src_stride, dst_stride,
src_type_num, dst_type_num);
*out_transferdata = NULL;
if (*out_stransfer == NULL) {
PyErr_SetString(PyExc_ValueError,
"unexpected error in GetStridedNumericCastFn");
return NPY_FAIL;
}
return NPY_SUCCEED;
}
static int
get_nbo_cast_transfer_function(int aligned,
npy_intp src_stride, npy_intp dst_stride,
PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
int move_references,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api,
int *out_needs_wrap)
{
//_strided_cast_data *data;
//PyMicArray_VectorUnaryFunc *castfunc;
//PyArray_Descr *tmp_dtype;
npy_intp shape = 1, src_itemsize = src_dtype->elsize,
dst_itemsize = dst_dtype->elsize;
if (PyTypeNum_ISNUMBER(src_dtype->type_num) &&
PyTypeNum_ISNUMBER(dst_dtype->type_num)) {
*out_needs_wrap = !PyArray_ISNBO(src_dtype->byteorder) ||
!PyArray_ISNBO(dst_dtype->byteorder);
return get_nbo_cast_numeric_transfer_function(aligned,
src_stride, dst_stride,
src_dtype->type_num, dst_dtype->type_num,
out_stransfer, out_transferdata);
}
*out_stransfer = NULL;
*out_transferdata = NULL;
return NPY_FAIL;
}
static int
get_cast_transfer_function(int device, int aligned,
npy_intp src_stride, npy_intp dst_stride,
PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
int move_references,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api)
{
PyMicArray_StridedUnaryOp *caststransfer;
NpyAuxData *castdata, *todata = NULL, *fromdata = NULL;
int needs_wrap = 0;
npy_intp src_itemsize = src_dtype->elsize,
dst_itemsize = dst_dtype->elsize;
if (get_nbo_cast_transfer_function(aligned,
src_stride, dst_stride,
src_dtype, dst_dtype,
move_references,
&caststransfer,
&castdata,
out_needs_api,
&needs_wrap) != NPY_SUCCEED) {
return NPY_FAIL;
}
/*
* If all native byte order and doesn't need alignment wrapping,
* return the function
*/
if (!needs_wrap) {
*out_stransfer = caststransfer;
*out_transferdata = castdata;
return NPY_SUCCEED;
}
/* Otherwise, we have to copy and/or swap to aligned temporaries */
else {
PyMicArray_StridedUnaryOp *tobuffer, *frombuffer;
/* Get the copy/swap operation from src */
PyMicArray_GetDTypeCopySwapFn(aligned,
src_stride, src_itemsize,
src_dtype,
&tobuffer, &todata);
/* Get the copy/swap operation to dst */
PyMicArray_GetDTypeCopySwapFn(aligned,
dst_itemsize, dst_stride,
dst_dtype,
&frombuffer, &fromdata);
if (frombuffer == NULL || tobuffer == NULL) {
NPY_AUXDATA_FREE(castdata);
NPY_AUXDATA_FREE(todata);
NPY_AUXDATA_FREE(fromdata);
return NPY_FAIL;
}
*out_stransfer = caststransfer;
/* Wrap it all up in a new transfer function + data */
if (wrap_aligned_contig_transfer_function(
device,
src_itemsize, dst_itemsize,
tobuffer, todata,
frombuffer, fromdata,
caststransfer, castdata,
PyDataType_FLAGCHK(dst_dtype, NPY_NEEDS_INIT),
out_stransfer, out_transferdata) != NPY_SUCCEED) {
NPY_AUXDATA_FREE(castdata);
NPY_AUXDATA_FREE(todata);
NPY_AUXDATA_FREE(fromdata);
return NPY_FAIL;
}
return NPY_SUCCEED;
}
}
/********************* DTYPE COPY SWAP FUNCTION ***********************/
NPY_NO_EXPORT int
PyMicArray_GetDTypeCopySwapFn(int aligned,
npy_intp src_stride, npy_intp dst_stride,
PyArray_Descr *dtype,
PyMicArray_StridedUnaryOp **outstransfer,
NpyAuxData **outtransferdata)
{
npy_intp itemsize = dtype->elsize;
/* If it's a custom data type, wrap its copy swap function */
if (dtype->type_num >= NPY_NTYPES) {
*outstransfer = NULL;
*outtransferdata = NULL;
}
/* A straight copy */
else if (itemsize == 1 || PyArray_ISNBO(dtype->byteorder)) {
*outstransfer = PyMicArray_GetStridedCopyFn(aligned,
src_stride, dst_stride,
itemsize);
*outtransferdata = NULL;
}
else if (dtype->kind == 'U') {
*outstransfer = NULL;
*outtransferdata = NULL;
}
/* If it's not complex, one swap */
else if (dtype->kind != 'c') {
*outstransfer = PyMicArray_GetStridedCopySwapFn(aligned,
src_stride, dst_stride,
itemsize);
*outtransferdata = NULL;
}
/* If complex, a paired swap */
else {
*outstransfer = PyMicArray_GetStridedCopySwapPairFn(aligned,
src_stride, dst_stride,
itemsize);
*outtransferdata = NULL;
}
return (*outstransfer == NULL) ? NPY_FAIL : NPY_SUCCEED;
}
/********************* MAIN DTYPE TRANSFER FUNCTION ***********************/
NPY_NO_EXPORT int
PyMicArray_GetDTypeTransferFunction(int device, int aligned,
npy_intp src_stride, npy_intp dst_stride,
PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
int move_references,
PyMicArray_StridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api)
{
npy_intp src_itemsize, dst_itemsize;
int src_type_num, dst_type_num;
#if NPY_DT_DBG_TRACING
printf("Calculating dtype transfer from ");
PyObject_Print((PyObject *)src_dtype, stdout, 0);
printf(" to ");
PyObject_Print((PyObject *)dst_dtype, stdout, 0);
printf("\n");
#endif
/*
* If one of the dtypes is NULL, we give back either a src decref
* function or a dst setzero function
*/
if (dst_dtype == NULL) {
if (move_references) {
return NPY_FAIL;
}
else {
*out_stransfer = &_dec_src_ref_nop;
*out_transferdata = NULL;
return NPY_SUCCEED;
}
}
else if (src_dtype == NULL) {
return get_setdstzero_transfer_function(aligned,
dst_dtype->elsize,
dst_dtype,
out_stransfer, out_transferdata,
out_needs_api);
}
src_itemsize = src_dtype->elsize;
dst_itemsize = dst_dtype->elsize;
src_type_num = src_dtype->type_num;
dst_type_num = dst_dtype->type_num;
/* Common special case - number -> number NBO cast */
if (PyTypeNum_ISNUMBER(src_type_num) &&
PyTypeNum_ISNUMBER(dst_type_num) &&
PyArray_ISNBO(src_dtype->byteorder) &&
PyArray_ISNBO(dst_dtype->byteorder)) {
if (PyArray_EquivTypenums(src_type_num, dst_type_num)) {
*out_stransfer = PyMicArray_GetStridedCopyFn(aligned,
src_stride, dst_stride,
src_itemsize);
*out_transferdata = NULL;
return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED;
}
else {
return get_nbo_cast_numeric_transfer_function (aligned,
src_stride, dst_stride,
src_type_num, dst_type_num,
out_stransfer, out_transferdata);
}
}
/*
* If there are no references and the data types are equivalent,
* return a simple copy
*/
if (!PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) &&
PyArray_EquivTypes(src_dtype, dst_dtype)) {
/*
* We can't pass through the aligned flag because it's not
* appropriate. Consider a size-8 string, it will say it's
* aligned because strings only need alignment 1, but the
* copy function wants to know if it's alignment 8.
*
* TODO: Change align from a flag to a "best power of 2 alignment"
* which holds the strongest alignment value for all
* the data which will be used.
*/
*out_stransfer = PyMicArray_GetStridedCopyFn(0,
src_stride, dst_stride,
src_dtype->elsize);
*out_transferdata = NULL;
return NPY_SUCCEED;
}
/* First look at the possibilities of just a copy or swap */
if (src_itemsize == dst_itemsize && src_dtype->kind == dst_dtype->kind &&
!PyDataType_HASFIELDS(src_dtype) &&
!PyDataType_HASFIELDS(dst_dtype) &&
!PyDataType_HASSUBARRAY(src_dtype) &&
!PyDataType_HASSUBARRAY(dst_dtype) &&
src_type_num != NPY_DATETIME && src_type_num != NPY_TIMEDELTA) {
/* A custom data type requires that we use its copy/swap */
if (src_type_num >= NPY_NTYPES || dst_type_num >= NPY_NTYPES) {
/*
* If the sizes and kinds are identical, but they're different
* custom types, then get a cast function
*/
return NPY_FAIL;
}
/* The special types, which have no or subelement byte-order */
switch (src_type_num) {
case NPY_UNICODE:
case NPY_VOID:
case NPY_STRING:
case NPY_OBJECT:
return NPY_FAIL;
}
/* This is a straight copy */
if (src_itemsize == 1 || PyArray_ISNBO(src_dtype->byteorder) ==
PyArray_ISNBO(dst_dtype->byteorder)) {
*out_stransfer = PyMicArray_GetStridedCopyFn(aligned,
src_stride, dst_stride,
src_itemsize);
*out_transferdata = NULL;
return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED;
}
/* This is a straight copy + byte swap */
else if (!PyTypeNum_ISCOMPLEX(src_type_num)) {
*out_stransfer = PyMicArray_GetStridedCopySwapFn(aligned,
src_stride, dst_stride,
src_itemsize);
*out_transferdata = NULL;
return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED;
}
/* This is a straight copy + element pair byte swap */
else {
*out_stransfer = PyMicArray_GetStridedCopySwapPairFn(aligned,
src_stride, dst_stride,
src_itemsize);
*out_transferdata = NULL;
return (*out_stransfer == NULL) ? NPY_FAIL : NPY_SUCCEED;
}
}
/* Handle subarrays */
if (PyDataType_HASSUBARRAY(src_dtype) ||
PyDataType_HASSUBARRAY(dst_dtype)) {
return NPY_FAIL;
}
/* Handle fields */
if ((PyDataType_HASFIELDS(src_dtype) || PyDataType_HASFIELDS(dst_dtype)) &&
src_type_num != NPY_OBJECT && dst_type_num != NPY_OBJECT) {
//TODO: figure out what field is
return NPY_FAIL;
}
/* Check for different-sized strings, unicodes, or voids */
if (src_type_num == dst_type_num) {
switch (src_type_num) {
case NPY_UNICODE:
case NPY_STRING:
case NPY_VOID:
return NPY_FAIL;
}
}
/* Otherwise a cast is necessary */
return get_cast_transfer_function(device, aligned,
src_stride, dst_stride,
src_dtype, dst_dtype,
move_references,
out_stransfer, out_transferdata,
out_needs_api);
}
NPY_NO_EXPORT int
PyMicArray_GetMaskedDTypeTransferFunction(int aligned,
npy_intp src_stride,
npy_intp dst_stride,
npy_intp mask_stride,
PyArray_Descr *src_dtype,
PyArray_Descr *dst_dtype,
PyArray_Descr *mask_dtype,
int move_references,
PyMicArray_MaskedStridedUnaryOp **out_stransfer,
NpyAuxData **out_transferdata,
int *out_needs_api)
{
//TODO
return NPY_FAIL;
}
NPY_NO_EXPORT int
PyMicArray_CastRawArrays(npy_intp count,
char *src, char *dst,
npy_intp src_stride, npy_intp dst_stride,
PyArray_Descr *src_dtype, PyArray_Descr *dst_dtype,
int move_references)
{
//TODO
return NPY_FAIL;
}
/*
* Prepares shape and strides for a simple raw array iteration.
* This sorts the strides into FORTRAN order, reverses any negative
* strides, then coalesces axes where possible. The results are
* filled in the output parameters.
*
* This is intended for simple, lightweight iteration over arrays
* where no buffering of any kind is needed, and the array may
* not be stored as a PyArrayObject.
*
* The arrays shape, out_shape, strides, and out_strides must all
* point to different data.
*
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
PyMicArray_PrepareOneRawArrayIter(int ndim, npy_intp *shape,
char *data, npy_intp *strides,
int *out_ndim, npy_intp *out_shape,
char **out_data, npy_intp *out_strides)
{
npy_stride_sort_item strideperm[NPY_MAXDIMS];
int i, j;
/* Special case 0 and 1 dimensions */
if (ndim == 0) {
*out_ndim = 1;
*out_data = data;
out_shape[0] = 1;
out_strides[0] = 0;
return 0;
}
else if (ndim == 1) {
npy_intp stride_entry = strides[0], shape_entry = shape[0];
*out_ndim = 1;
out_shape[0] = shape[0];
/* Always make a positive stride */
if (stride_entry >= 0) {
*out_data = data;
out_strides[0] = stride_entry;
}
else {
*out_data = data + stride_entry * (shape_entry - 1);
out_strides[0] = -stride_entry;
}
return 0;
}
/* Sort the axes based on the destination strides */
PyArray_CreateSortedStridePerm(ndim, strides, strideperm);
for (i = 0; i < ndim; ++i) {
int iperm = strideperm[ndim - i - 1].perm;
out_shape[i] = shape[iperm];
out_strides[i] = strides[iperm];
}
/* Reverse any negative strides */
for (i = 0; i < ndim; ++i) {
npy_intp stride_entry = out_strides[i], shape_entry = out_shape[i];
if (stride_entry < 0) {
data += stride_entry * (shape_entry - 1);
out_strides[i] = -stride_entry;
}
/* Detect 0-size arrays here */
if (shape_entry == 0) {
*out_ndim = 1;
*out_data = data;
out_shape[0] = 0;
out_strides[0] = 0;
return 0;
}
}
/* Coalesce any dimensions where possible */
i = 0;
for (j = 1; j < ndim; ++j) {
if (out_shape[i] == 1) {
/* Drop axis i */
out_shape[i] = out_shape[j];
out_strides[i] = out_strides[j];
}
else if (out_shape[j] == 1) {
/* Drop axis j */
}
else if (out_strides[i] * out_shape[i] == out_strides[j]) {
/* Coalesce axes i and j */
out_shape[i] *= out_shape[j];
}
else {
/* Can't coalesce, go to next i */
++i;
out_shape[i] = out_shape[j];
out_strides[i] = out_strides[j];
}
}
ndim = i+1;
#if 0
/* DEBUG */
{
printf("raw iter ndim %d\n", ndim);
printf("shape: ");
for (i = 0; i < ndim; ++i) {
printf("%d ", (int)out_shape[i]);
}
printf("\n");
printf("strides: ");
for (i = 0; i < ndim; ++i) {
printf("%d ", (int)out_strides[i]);
}
printf("\n");
}
#endif
*out_data = data;
*out_ndim = ndim;
return 0;
}
/*
* The same as PyArray_PrepareOneRawArrayIter, but for two
* operands instead of one. Any broadcasting of the two operands
* should have already been done before calling this function,
* as the ndim and shape is only specified once for both operands.
*
* Only the strides of the first operand are used to reorder
* the dimensions, no attempt to consider all the strides together
* is made, as is done in the NpyIter object.
*
* You can use this together with NPY_RAW_ITER_START and
* NPY_RAW_ITER_TWO_NEXT to handle the looping boilerplate of everything
* but the innermost loop (which is for idim == 0).
*
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
PyMicArray_PrepareTwoRawArrayIter(int ndim, npy_intp *shape,
char *dataA, npy_intp *stridesA,
char *dataB, npy_intp *stridesB,
int *out_ndim, npy_intp *out_shape,
char **out_dataA, npy_intp *out_stridesA,
char **out_dataB, npy_intp *out_stridesB)
{
npy_stride_sort_item strideperm[NPY_MAXDIMS];
int i, j;
/* Special case 0 and 1 dimensions */
if (ndim == 0) {
*out_ndim = 1;
*out_dataA = dataA;
*out_dataB = dataB;
out_shape[0] = 1;
out_stridesA[0] = 0;
out_stridesB[0] = 0;
return 0;
}
else if (ndim == 1) {
npy_intp stride_entryA = stridesA[0], stride_entryB = stridesB[0];
npy_intp shape_entry = shape[0];
*out_ndim = 1;
out_shape[0] = shape[0];
/* Always make a positive stride for the first operand */
if (stride_entryA >= 0) {
*out_dataA = dataA;
*out_dataB = dataB;
out_stridesA[0] = stride_entryA;
out_stridesB[0] = stride_entryB;
}
else {
*out_dataA = dataA + stride_entryA * (shape_entry - 1);
*out_dataB = dataB + stride_entryB * (shape_entry - 1);
out_stridesA[0] = -stride_entryA;
out_stridesB[0] = -stride_entryB;
}
return 0;
}
/* Sort the axes based on the destination strides */
PyArray_CreateSortedStridePerm(ndim, stridesA, strideperm);
for (i = 0; i < ndim; ++i) {
int iperm = strideperm[ndim - i - 1].perm;
out_shape[i] = shape[iperm];
out_stridesA[i] = stridesA[iperm];
out_stridesB[i] = stridesB[iperm];
}
/* Reverse any negative strides of operand A */
for (i = 0; i < ndim; ++i) {
npy_intp stride_entryA = out_stridesA[i];
npy_intp stride_entryB = out_stridesB[i];
npy_intp shape_entry = out_shape[i];
if (stride_entryA < 0) {
dataA += stride_entryA * (shape_entry - 1);
dataB += stride_entryB * (shape_entry - 1);
out_stridesA[i] = -stride_entryA;
out_stridesB[i] = -stride_entryB;
}
/* Detect 0-size arrays here */
if (shape_entry == 0) {
*out_ndim = 1;
*out_dataA = dataA;
*out_dataB = dataB;
out_shape[0] = 0;
out_stridesA[0] = 0;
out_stridesB[0] = 0;
return 0;
}
}
/* Coalesce any dimensions where possible */
i = 0;
for (j = 1; j < ndim; ++j) {
if (out_shape[i] == 1) {
/* Drop axis i */
out_shape[i] = out_shape[j];
out_stridesA[i] = out_stridesA[j];
out_stridesB[i] = out_stridesB[j];
}
else if (out_shape[j] == 1) {
/* Drop axis j */
}
else if (out_stridesA[i] * out_shape[i] == out_stridesA[j] &&
out_stridesB[i] * out_shape[i] == out_stridesB[j]) {
/* Coalesce axes i and j */
out_shape[i] *= out_shape[j];
}
else {
/* Can't coalesce, go to next i */
++i;
out_shape[i] = out_shape[j];
out_stridesA[i] = out_stridesA[j];
out_stridesB[i] = out_stridesB[j];
}
}
ndim = i+1;
*out_dataA = dataA;
*out_dataB = dataB;
*out_ndim = ndim;
return 0;
}
/*
* The same as PyArray_PrepareOneRawArrayIter, but for three
* operands instead of one. Any broadcasting of the three operands
* should have already been done before calling this function,
* as the ndim and shape is only specified once for all operands.
*
* Only the strides of the first operand are used to reorder
* the dimensions, no attempt to consider all the strides together
* is made, as is done in the NpyIter object.
*
* You can use this together with NPY_RAW_ITER_START and
* NPY_RAW_ITER_THREE_NEXT to handle the looping boilerplate of everything
* but the innermost loop (which is for idim == 0).
*
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
PyMicArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape,
char *dataA, npy_intp *stridesA,
char *dataB, npy_intp *stridesB,
char *dataC, npy_intp *stridesC,
int *out_ndim, npy_intp *out_shape,
char **out_dataA, npy_intp *out_stridesA,
char **out_dataB, npy_intp *out_stridesB,
char **out_dataC, npy_intp *out_stridesC)
{
npy_stride_sort_item strideperm[NPY_MAXDIMS];
int i, j;
/* Special case 0 and 1 dimensions */
if (ndim == 0) {
*out_ndim = 1;
*out_dataA = dataA;
*out_dataB = dataB;
*out_dataC = dataC;
out_shape[0] = 1;
out_stridesA[0] = 0;
out_stridesB[0] = 0;
out_stridesC[0] = 0;
return 0;
}
else if (ndim == 1) {
npy_intp stride_entryA = stridesA[0];
npy_intp stride_entryB = stridesB[0];
npy_intp stride_entryC = stridesC[0];
npy_intp shape_entry = shape[0];
*out_ndim = 1;
out_shape[0] = shape[0];
/* Always make a positive stride for the first operand */
if (stride_entryA >= 0) {
*out_dataA = dataA;
*out_dataB = dataB;
*out_dataC = dataC;
out_stridesA[0] = stride_entryA;
out_stridesB[0] = stride_entryB;
out_stridesC[0] = stride_entryC;
}
else {
*out_dataA = dataA + stride_entryA * (shape_entry - 1);
*out_dataB = dataB + stride_entryB * (shape_entry - 1);
*out_dataC = dataC + stride_entryC * (shape_entry - 1);
out_stridesA[0] = -stride_entryA;
out_stridesB[0] = -stride_entryB;
out_stridesC[0] = -stride_entryC;
}
return 0;
}
/* Sort the axes based on the destination strides */
PyArray_CreateSortedStridePerm(ndim, stridesA, strideperm);
for (i = 0; i < ndim; ++i) {
int iperm = strideperm[ndim - i - 1].perm;
out_shape[i] = shape[iperm];
out_stridesA[i] = stridesA[iperm];
out_stridesB[i] = stridesB[iperm];
out_stridesC[i] = stridesC[iperm];
}
/* Reverse any negative strides of operand A */
for (i = 0; i < ndim; ++i) {
npy_intp stride_entryA = out_stridesA[i];
npy_intp stride_entryB = out_stridesB[i];
npy_intp stride_entryC = out_stridesC[i];
npy_intp shape_entry = out_shape[i];
if (stride_entryA < 0) {
dataA += stride_entryA * (shape_entry - 1);
dataB += stride_entryB * (shape_entry - 1);
dataC += stride_entryC * (shape_entry - 1);
out_stridesA[i] = -stride_entryA;
out_stridesB[i] = -stride_entryB;
out_stridesC[i] = -stride_entryC;
}
/* Detect 0-size arrays here */
if (shape_entry == 0) {
*out_ndim = 1;
*out_dataA = dataA;
*out_dataB = dataB;
*out_dataC = dataC;
out_shape[0] = 0;
out_stridesA[0] = 0;
out_stridesB[0] = 0;
out_stridesC[0] = 0;
return 0;
}
}
/* Coalesce any dimensions where possible */
i = 0;
for (j = 1; j < ndim; ++j) {
if (out_shape[i] == 1) {
/* Drop axis i */
out_shape[i] = out_shape[j];
out_stridesA[i] = out_stridesA[j];
out_stridesB[i] = out_stridesB[j];
out_stridesC[i] = out_stridesC[j];
}
else if (out_shape[j] == 1) {
/* Drop axis j */
}
else if (out_stridesA[i] * out_shape[i] == out_stridesA[j] &&
out_stridesB[i] * out_shape[i] == out_stridesB[j] &&
out_stridesC[i] * out_shape[i] == out_stridesC[j]) {
/* Coalesce axes i and j */
out_shape[i] *= out_shape[j];
}
else {
/* Can't coalesce, go to next i */
++i;
out_shape[i] = out_shape[j];
out_stridesA[i] = out_stridesA[j];
out_stridesB[i] = out_stridesB[j];
out_stridesC[i] = out_stridesC[j];
}
}
ndim = i+1;
*out_dataA = dataA;
*out_dataB = dataB;
*out_dataC = dataC;
*out_ndim = ndim;
return 0;
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <assert.h>
#define N 197770
#define GRID_LEN 128
#define MAXX 5100
#define MAXY 3400
#define MAX_DIST 7000
#define STARTING_POS 121572
struct pos
{
int i;
int j;
};
int da_x[] = { 0, -1, -1, 0, 1, 1, -1, 1};
int da_y[] = {-1, -1, 0, 1, 1, 0, 1, -1};
void get_chebyshev_neighbors(struct pos ij, int mx, int my, int* n, struct pos* ret)
{
int i = ij.i;
int j = ij.j;
*n = 8;
int ci = 0;
for(int k = 0; k < 8; k++)
{
int di = i + da_x[k];
int dj = j + da_y[k];
if(di < 0 || di > mx) continue;
if(dj < 0 || dj > my) continue;
ret[ci].i = di;
ret[ci].j = dj;
ci++;
}
*n = ci;
}
struct pos get_pos_from_xy(double x, double y)
{
struct pos ret;
ret.i = (int)floor(x / MAXX);
ret.j = (int)floor(y / MAXY);
return ret;
}
static char is_prime[N];
int get_all_primes(int below, int** out)
{
int prime_count = 0;
for(int i = 0; i < below; i++) is_prime[i] = 1;
is_prime[0] = 0;
is_prime[1] = 0;
for(int i = 2; i < below; i++)
{
if(is_prime[i])
{
prime_count++;
for(int q = 2 * i; q < below; q += i)
is_prime[q] = 0;
}
}
int* ret = malloc(prime_count * sizeof(int));
int current_prime_index = 0;
for(int i = 0; i < below; i++)
{
if(is_prime[i])
ret[current_prime_index++] = i;
}
*out = ret;
return prime_count;
}
void random_permutattion(int* array, int n)
{
for(int i = 0; i < n - 1; i++)
{
int j = i + rand() % (n - i);
int temp = array[i];
array[i] = array[j];
array[j] = temp;
}
}
double euclidean_distance(double x1, double y1, double x2, double y2)
{
double dx = x1 - x2;
double dy = y1 - y2;
return sqrt(dx * dx + dy * dy);
}
struct city
{
int id;
double x;
double y;
};
struct city cities[N];
void load_cities(const char* file_name)
{
FILE* input_file = fopen(file_name, "r");
char* buf = NULL;
size_t buf_len = 0;
while(1)
{
ssize_t read_count =
getline(&buf, &buf_len, input_file);
if(read_count == -1)
break;
int city_id;
double x, y;
sscanf(buf, "%d,%lf,%lf", &city_id, &x, &y);
cities[city_id].id = city_id;
cities[city_id].x = x;
cities[city_id].y = y;
}
free(buf);
fclose(input_file);
}
double dist_between_id(int id1, int id2)
{
double x1 = cities[id1].x;
double y1 = cities[id1].y;
double x2 = cities[id2].x;
double y2 = cities[id2].y;
return euclidean_distance(x1, y1, x2, y2);
}
struct bucket_node
{
int id;
struct bucket_node* next;
};
struct bucket_node* init_bucket_node(int id)
{
struct bucket_node* ret = malloc(sizeof(struct bucket_node));
ret->id = id;
ret->next = NULL;
return ret;
}
void prepend_bucket_node(struct bucket_node* bn, struct bucket_node* before)
{
bn->next = before;
}
void delete_bucket_node(struct bucket_node* bn)
{
if(bn) {
delete_bucket_node(bn->next);
free(bn);
}
}
struct bucket_grid_2d
{
int grid_len;
struct bucket_node** buckets;
};
struct bucket_grid_2d init_bucket_grid_2d(int grid_len)
{
struct bucket_grid_2d ret;
ret.grid_len = grid_len;
ret.buckets = calloc(grid_len * grid_len, sizeof(struct bucket_node*));
return ret;
}
struct bucket_node* get_at_bucket_grid_2d(struct bucket_grid_2d bg, int i, int j)
{
int pos = bg.grid_len * i + j;
return bg.buckets[pos];
}
void insert_at_bucket_grid_2d(struct bucket_grid_2d bg, int i, int j, int id)
{
int pos = bg.grid_len * i + j;
struct bucket_node* bn = init_bucket_node(id);
struct bucket_node* before = bg.buckets[pos];
if(before) {
prepend_bucket_node(bn, before);
}
bg.buckets[pos] = bn;
}
void remove_from_bucket_grid_2d(struct bucket_grid_2d bg, int i, int j, int id)
{
int pos = bg.grid_len * i + j;
struct bucket_node* bn = bg.buckets[pos];
if(bn->id == id)
{
bg.buckets[pos] = bn->next;
free(bn);
return;
}
struct bucket_node* prev = bn;
bn = bn->next;
while(bn)
{
if(bn->id == id)
{
prev->next = bn->next;
free(bn);
break;
}
prev = bn;
bn = bn->next;
}
}
void delete_bucket_grid_2d(struct bucket_grid_2d bg)
{
int n = bg.grid_len;
for(int i = 0; i < n; i++)
{
for(int j = 0; j < n; j++)
{
struct bucket_node* bn = get_at_bucket_grid_2d(bg, i, j);
delete_bucket_node(bn);
}
}
}
static struct bucket_grid_2d bucket_grid;
struct solution
{
int n;
int* ids;
};
struct solution load_solution(char* fname, int n)
{
struct solution ret;
ret.n = n;
ret.ids = malloc(n * sizeof(int));
FILE* f = fopen(fname, "r");
char* buf = NULL;
size_t buf_len = 0;
int current_index = 0;
//ignore first line
getline(&buf, &buf_len, f);
while(1)
{
ssize_t read_count =
getline(&buf, &buf_len, f);
if(read_count == -1)
break;
sscanf(buf, "%d", &ret.ids[current_index++]);
}
free(buf);
fclose(f);
return ret;
}
int get_nearest_neighbor(int id, char* visited, struct bucket_grid_2d bg, char prime_preferred)
{
struct pos ij = get_pos_from_xy(cities[id].x, cities[id].y);
int num_neighbors;
struct pos neighbors[8];
get_chebyshev_neighbors(ij, MAXX, MAXY, &num_neighbors, neighbors);
double min_dist = MAX_DIST;
int min_id = 0;
int ci = 0;
do
{
struct bucket_node* bn = get_at_bucket_grid_2d(bg, ij.i, ij.j);
while(bn)
{
if(!visited[bn->id])
{
double dist = dist_between_id(id, bn->id);
if(prime_preferred && !is_prime[bn->id])
dist *= 1.1;
if (min_dist > dist)
{
min_dist = dist;
min_id = bn->id;
}
}
bn = bn->next;
}
ij = neighbors[ci];
ci++;
if(ci == num_neighbors) break;
} while(1);
return min_id;
}
struct solution nearest_neighbor_solution(const int* ids, int n, int starting_pos)
{
//delete_bucket_grid_2d(bucket_grid);
//int bucket_grid_len = GRID_LEN;
//struct bucket_grid_2d bucket_grid = init_bucket_grid_2d(bucket_grid_len);
for(int j = 0; j < n; j++)
{
int i = ids[j];
struct pos ij = get_pos_from_xy(cities[i].x, cities[i].y);
insert_at_bucket_grid_2d(bucket_grid, ij.i, ij.j, cities[i].id);
}
struct solution ret;
ret.n = n;
ret.ids = malloc(n * sizeof(int));
ret.ids[0] = starting_pos;
ret.ids[n-1] = starting_pos;
int max_id = 0;
for(int i = 0; i < n; i++)
{
if(ids[i] > max_id) max_id = ids[i];
}
char* visited = calloc(max_id + 1, 1);
visited[starting_pos] = 1;
for(int i = 1; i < n - 1; i++)
{
int next_id = get_nearest_neighbor(ret.ids[i - 1], visited, bucket_grid, 0);//i % 10 == 0);
assert(next_id != starting_pos);
visited[next_id] = 1;
ret.ids[i] = next_id;
if(i % 10000 == 0) printf("%lf\n", (double)i / n);
}
printf("Last Edge Cost: %lf\n", dist_between_id(ret.ids[n-1], ret.ids[n-2]));
return ret;
}
struct solution nearest_neighbor_dual_approach_solution(const int* ids, int n)
{
//delete_bucket_grid_2d(bucket_grid);
//int bucket_grid_len = GRID_LEN;
//struct bucket_grid_2d bucket_grid = init_bucket_grid_2d(bucket_grid_len);
for(int j = 0; j < n; j++)
{
int i = ids[j];
struct pos ij = get_pos_from_xy(cities[i].x, cities[i].y);
insert_at_bucket_grid_2d(bucket_grid, ij.i, ij.j, cities[i].id);
}
struct solution ret;
ret.n = n;
ret.ids = malloc(n * sizeof(int));
ret.ids[0] = 0;
ret.ids[n-1] = 0;
int max_id = 0;
for(int i = 0; i < n; i++)
{
if(ids[i] > max_id) max_id = ids[i];
}
char* visited = calloc(max_id + 1, 1);
visited[0] = 1;
int i, j;
i = 1;
j = n - 2;
while(i <= j)
{
int next_id = get_nearest_neighbor(ret.ids[i - 1], visited, bucket_grid, i % 10 == 0);
assert(next_id != 0);
visited[next_id] = 1;
ret.ids[i] = next_id;
if(i == j) break;
next_id = get_nearest_neighbor(ret.ids[j + 1], visited, bucket_grid, (j + 1) % 10 == 0);
assert(next_id != 0);
visited[next_id] = 1;
ret.ids[j] = next_id;
if(i % 10000 == 0) printf("%lf\n", (i * 2.0)/ n);
i++;
j--;
}
printf("Last Edge Cost: %lf\n", dist_between_id(ret.ids[n-1], ret.ids[n-2]));
return ret;
}
int mutate_h3_solution(struct solution sol, int extent);
double eval_solution(struct solution sol);
struct solution generate_solution(const int* ids, int n)
{
struct solution ret;
//ret.n = n;
//ret.ids = malloc(n * sizeof(int));
//memcpy(ret.ids, ids, n * sizeof(int));
//random_permutattion(ret.ids + 1, n - 2);
//int max_step = 10000;
//while(max_step--) mutate_h3_solution(ret, 100);
ret = nearest_neighbor_solution(ids, n, STARTING_POS);
double cost = eval_solution(ret);
printf("Initial solution cost: %lf\n", cost);
return ret;
}
void mutate_solution(struct solution sol)
{
int n = sol.n - 2;
int pos1 = 1 + rand() % n;
int pos2 = 1 + rand() % n;
int temp = sol.ids[pos1];
sol.ids[pos1] = sol.ids[pos2];
sol.ids[pos2] = temp;
}
int mutate_h1_solution(struct solution sol)
{
int ret = 0;
int i = rand() % (sol.n - 3);
int k = 3 + rand() % 500;
if(i + k > sol.n) k -= sol.n - i;
int a = sol.ids[i];
int b = sol.ids[i + 1];
int c = sol.ids[i + k - 1];
int d = sol.ids[i + k];
if(dist_between_id(a, c) + dist_between_id(b, d) < dist_between_id(a, b) + dist_between_id(c, d))
{
int j = i + k - 1;
i++;
while(i <= j)
{
int temp = sol.ids[i];
sol.ids[i] = sol.ids[j];
sol.ids[j] = temp;
i++;
j--;
}
ret = 1;
}
return ret;
}
void mutate_h2_solution(struct solution sol)
{
if(mutate_h1_solution(sol));
else mutate_solution(sol);
}
int mutate_h3_solution(struct solution sol, int extent)
{
int ret = 0;
int n = sol.n;
if(rand() / RAND_MAX > 0.5)
{
for(int i = 0; i < n - 2; i++)
{
double prev_dist = dist_between_id(sol.ids[i], sol.ids[i + 1]);
for(int j = i + 2; j < n - 1 && j < i + extent; j++)
{
if(dist_between_id(sol.ids[i], sol.ids[j]) < prev_dist)
{
int temp = sol.ids[i + 1];
sol.ids[i + 1] = sol.ids[j];
sol.ids[j] = temp;
ret = 1;
break;
}
}
}
}
else
{
for(int i = n - 1; i > 1; i--)
{
double prev_dist = dist_between_id(sol.ids[i], sol.ids[i - 1]);
for(int j = i - 1; j > 0 && j > i - extent; j--)
{
if(dist_between_id(sol.ids[i], sol.ids[j]) < prev_dist)
{
int temp = sol.ids[i - 1];
sol.ids[i - 1] = sol.ids[j];
sol.ids[j] = temp;
ret = 1;
break;
}
}
}
}
return ret;
}
void mutate_h4_solution(struct solution sol)
{
}
void multiple_mutate_solution(struct solution sol, int num)
{
for(int i = 0; i < num; i++)
{
mutate_h1_solution(sol);
}
}
struct solution copy_solution(struct solution sol)
{
struct solution ret;
ret.n = sol.n;
ret.ids = malloc(sol.n * sizeof(int));
memcpy(ret.ids, sol.ids, sol.n * sizeof(int));
return ret;
}
void inplace_copy_solution(struct solution dest, struct solution src)
{
memcpy(dest.ids, src.ids, src.n * sizeof(int));
}
void delete_solution(struct solution sol)
{
free(sol.ids);
}
double eval_solution(struct solution sol)
{
double cost = 0.0;
int i;
#pragma omp parallel for reduction (+:cost)
for(i = 0; i < sol.n - 1; i++)
{
double d = dist_between_id(sol.ids[i], sol.ids[i + 1]);
if(i + 1 % 10 == 0 && !is_prime[sol.ids[i + 1]]) d *= 1.1;
cost += d;
}
return cost;
}
void assert_solution(struct solution sol)
{
/*
assert(sol.n > 0);
assert(sol.ids[0] == 0);
assert(sol.ids[sol.n - 1] == 0);
*/
}
struct solution steepest_ascent_hc(int neighbor_count, long long num_iter, int multi_mut, double gamma, int patience, struct solution template_solution)
{
int best_sol_index = 0;
//struct solution best_solution = generate_solution(template_solution.ids, template_solution.n);
struct solution best_solution = copy_solution(template_solution);
double best_cost = eval_solution(best_solution);
printf("Initial cost: %lf\n", best_cost);
struct solution* sols;
sols = calloc(neighbor_count, sizeof(struct solution));
for(int j = 0; j < neighbor_count; j++)
{
sols[j] = copy_solution(best_solution);
}
int no_improvement = 0;
//int mut3_extent = 10;
//int should_use_h3 = 1;
for(long long i = 0; no_improvement < patience && i < num_iter; i++)
{
// make copies of best solution
for(int j = 0; j < neighbor_count; j++)
{
inplace_copy_solution(sols[j], best_solution);
}
//puts("made copies of best sol");
// mutate each solution
for(int j = 0; j < neighbor_count; j++)
{
//mutate_solution(sols[j]);
multiple_mutate_solution(sols[j], (int)ceil(multi_mut * exp(gamma * i)));
//mutate_h2_solution(sols[j]);
//mutate_h1_solution(sols[j]);
//if(should_use_h3) should_use_h3 = mutate_h3_solution(sols[j], mut3_extent);
//else mutate_h2_solution(sols[j]);
}
// find best solution
best_sol_index = -1;
for(int j = 0; j < neighbor_count; j++)
{
double cost = eval_solution(sols[j]);
if(cost < best_cost)
{
best_sol_index = j;
best_cost = cost;
}
}
if(best_sol_index == -1)
{
//puts("did not find better solution");
// got only worse solutions
// copy previous best solution to position 0
best_sol_index = 0;
inplace_copy_solution(sols[0], best_solution);
no_improvement++;
}
else
{
//puts("found better solution");
// copy best soluion to best_solution
inplace_copy_solution(best_solution, sols[best_sol_index]);
no_improvement = 0;
}
if(i % 100 == 0) printf("Iter: %lld Best Cost: %lf\n", i, best_cost);
}
for(int j = 0; j < neighbor_count; j++)
delete_solution(sols[j]);
free(sols);
return best_solution;
}
int main(int argc, char* argv[])
{
srand(time(NULL));
load_cities(argv[1]);
//int starting_pos;
//sscanf(argv[3], "%d", &starting_pos);
//printf("%d\n", starting_pos);
int* primes;
int prime_count = get_all_primes(N, &primes);
//int n = prime_count + 2;
int n = N;
printf("n = %d\n", n);
//int bucket_grid_len = GRID_LEN;
//bucket_grid = init_bucket_grid_2d(bucket_grid_len);
//for(int i = 0; i < n; i++)
//{
// struct pos ij = get_pos_from_xy(cities[i].x, cities[i].y);
// insert_at_bucket_grid_2d(bucket_grid, ij.i, ij.j, cities[i].id);
//}
//puts("Bucket grid creation done");
struct solution template_solution = load_solution(argv[3], n);
//template_solution.n = n;
//template_solution.ids = malloc(n * sizeof(int));
//template_solution.ids[0] = STARTING_POS;
//template_solution.ids[n - 1] = STARTING_POS;
//for(int i = 1; i < n - 1; i++)
//{
//template_solution.ids[i] = primes[i - 1];
// template_solution.ids[i] = i;
//}
//struct solution result = nearest_neighbor_solution(template_solution.ids, n, starting_pos);
//struct solution result = nearest_neighbor_dual_approach_solution(template_solution.ids, n);
struct solution result = steepest_ascent_hc(20, 10000LL, 100000, -0.00001, 500, template_solution);
//puts("Done with nearest neighbor");
double cost = eval_solution(result);
printf("Cost: %lf\n", cost);
int pos_of_0 = 0;
//for(int i = 0; i < result.n; i++)
//{
// if(result.ids[i] == 0)
// {
// pos_of_0 = i;
// break;
// }
//}
FILE* f = fopen(argv[2], "w");
fprintf(f, "Path\n");
for(int i = pos_of_0; i < result.n; i++)
{
fprintf(f, "%d\n", result.ids[i]);
}
for(int i = 1; i <= pos_of_0; i++)
{
fprintf(f, "%d\n", result.ids[i]);
}
fclose(f);
delete_solution(template_solution);
delete_solution(result);
//delete_bucket_grid_2d(bucket_grid);
free(primes);
return 0;
}
|
ccsd_pack.c | /*
*
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
//#include <omp.h>
#include "config.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
/*
* a * v1 + b * v2.transpose(0,2,1,3)
*/
void CCmake_0213(double *out, double *v1, double *v2, int count, int m,
double a, double b)
{
#pragma omp parallel default(none) \
shared(count, m, out, v1, v2, a, b)
{
int i, j, k, l, n;
size_t d2 = m * m;
size_t d1 = m * m * m;
double *pv1, *pv2, *pout;
#pragma omp for schedule (static)
for (i = 0; i < count; i++) {
for (n = 0, j = 0; j < m; j++) {
for (k = 0; k < m; k++) {
pout = out + d1*i + d2*j + m*k;
pv1 = v1 + d1*i + d2*j + m*k;
pv2 = v2 + d1*i + d2*k + m*j;
for (l = 0; l < m; l++, n++) {
pout[l] = pv1[l] * a + pv2[l] * b;
}
} } }
}
}
/*
* out = v1 + v2.transpose(0,2,1)
*/
void CCsum021(double *out, double *v1, double *v2, int count, int m)
{
#pragma omp parallel default(none) \
shared(count, m, out, v1, v2)
{
int i, j, k, n;
size_t mm = m * m;
double *pout, *pv1, *pv2;
#pragma omp for schedule (static)
for (i = 0; i < count; i++) {
pout = out + mm * i;
pv1 = v1 + mm * i;
pv2 = v2 + mm * i;
for (n = 0, j = 0; j < m; j++) {
for (k = 0; k < m; k++, n++) {
pout[n] = pv1[n] + pv2[k*m+j];
} }
}
}
}
/*
* g2 = a * v1 + b * v2.transpose(0,2,1)
*/
void CCmake_021(double *out, double *v1, double *v2, int count, int m,
double a, double b)
{
if (a == 1 && b == 1) {
return CCsum021(out, v1, v2, count, m);
}
#pragma omp parallel default(none) \
shared(count, m, out, v1, v2, a, b)
{
int i, j, k, n;
size_t mm = m * m;
double *pout, *pv1, *pv2;
#pragma omp for schedule (static)
for (i = 0; i < count; i++) {
pout = out + mm * i;
pv1 = v1 + mm * i;
pv2 = v2 + mm * i;
for (n = 0, j = 0; j < m; j++) {
for (k = 0; k < m; k++, n++) {
pout[n] = pv1[n] * a + pv2[k*m+j] * b;
} }
}
}
}
/*
* if matrix B is symmetric for the contraction A_ij B_ij,
* Tr(AB) ~ A_ii B_ii + (A_ij + A_ji) B_ij where i > j
* This function extract the A_ii and the lower triangluar part of A_ij + A_ji
*/
void CCprecontract(double *out, double *in, int count, int m, double diagfac)
{
#pragma omp parallel default(none) \
shared(count, m, in, out, diagfac)
{
int i, j, k, n;
size_t mm = m * m;
size_t m2 = m * (m+1) / 2;
double *pout, *pin;
#pragma omp for schedule (static)
for (i = 0; i < count; i++) {
pout = out + m2 * i;
pin = in + mm * i;
for (n = 0, j = 0; j < m; j++) {
for (k = 0; k < j; k++, n++) {
pout[n] = pin[j*m+k] + pin[k*m+j];
}
pout[n] = pin[j*m+j] * diagfac;
n++;
}
}
}
}
/*
* if i1 == j1:
* eri = unpack_tril(eri, axis=0)
* unpack_tril(eri).reshape(i1-i0,j1-j0,nao,nao).transpose(0,2,1,3)
*/
void CCload_eri(double *out, double *eri, int *orbs_slice, int nao)
{
int i0 = orbs_slice[0];
int i1 = orbs_slice[1];
int j0 = orbs_slice[2];
int j1 = orbs_slice[3];
size_t ni = i1 - i0;
size_t nj = j1 - j0;
size_t nn = nj * nao;
size_t nao_pair = nao * (nao + 1) / 2;
#pragma omp parallel default(none) \
shared(out, eri, i1, j1, ni, nj, nn, nao, nao_pair)
{
int i, j, k, l, ij;
double *pout;
double *buf = malloc(sizeof(double) * nao*nao);
#pragma omp for schedule (static)
for (ij = 0; ij < ni*nj; ij++) {
i = ij / nj;
j = ij % nj;
NPdunpack_tril(nao, eri+ij*nao_pair, buf, 1);
pout = out + (i*nn+j)*nao;
for (k = 0; k < nao; k++) {
for (l = 0; l < nao; l++) {
pout[k*nn+l] = buf[k*nao+l];
} }
}
free(buf);
}
}
/*
* eri put virtual orbital first
* [ v ]
* [ v . ]
* [ v . . ]
* [ o . . . ]
* [ o . . . . ]
*/
void CCsd_sort_inplace(double *eri, int nocc, int nvir, int count)
{
#pragma omp parallel default(none) \
shared(eri, nocc, nvir, count)
{
int ic, i, j, ij;
size_t nmo = nocc + nvir;
size_t nmo_pair = nmo * (nmo+1) / 2;
size_t nocc_pair = nocc * (nocc+1) /2;
size_t nvir_pair = nvir * (nvir+1) /2;
double *peri, *pout;
double *buf = malloc(sizeof(double) * nocc*nvir);
#pragma omp for schedule (static)
for (ic = 0; ic < count; ic++) {
peri = eri + ic*nmo_pair + nvir_pair;
for (i = 0; i < nocc; i++, peri+=nvir+i) {
for (j = 0; j < nvir; j++) {
buf[i*nvir+j] = peri[j];
}
}
pout = eri + ic*nmo_pair + nvir_pair;
peri = eri + ic*nmo_pair + nvir_pair + nvir;
for (ij = 0, i = 0; i < nocc; i++, peri+=nvir+i) {
for (j = 0; j <= i; j++, ij++) {
pout[ij] = peri[j];
}
}
pout = eri + ic*nmo_pair + nvir_pair + nocc_pair;
memcpy(pout, buf, sizeof(double)*nocc*nvir);
}
free(buf);
}
}
|
alloc.c | #define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "structmember.h"
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define NO_IMPORT_ARRAY
#define PY_ARRAY_UNIQUE_SYMBOL MICPY_ARRAY_API
#include <numpy/ndarraytypes.h>
#include <numpy/arrayobject.h>
#include <numpy/npy_common.h>
//#include "npy_config.h"
#define _MICARRAYMODULE
#include "common.h"
#include "alloc.h"
#include <assert.h>
#define NBUCKETS 1024 /* number of buckets for data*/
#define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */
#define NCACHE 7 /* number of cache entries per bucket */
/* this structure fits neatly into a cacheline */
typedef struct {
npy_uintp available; /* number of cached pointers */
void * ptrs[NCACHE];
} cache_bucket;
static cache_bucket datacache[NMAXDEVICES*NBUCKETS];
static cache_bucket dimcache[NBUCKETS_DIM];
/*
* very simplistic small memory block cache to avoid more expensive libc
* allocations
* base function for data cache with 1 byte buckets and dimension cache with
* sizeof(npy_intp) byte buckets
*/
static NPY_INLINE void *
_mpy_alloc_cache(int dev, npy_uintp nelem, npy_uintp esz, npy_uint msz,
cache_bucket * cache, void * (*alloc)(size_t, int))
{
assert((dev >= 0 && dev < NDEVICES) &&
((esz == 1 && cache == datacache) ||
(esz == sizeof(npy_intp) && cache == dimcache)));
if (nelem < msz) {
int i = dev*msz + nelem;
if (cache[i].available > 0) {
return cache[i].ptrs[--(cache[i].available)];
}
}
return alloc(nelem * esz, dev);
}
static NPY_INLINE void *
_npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz,
cache_bucket * cache, void * (*alloc)(size_t))
{
assert((esz == 1 && cache == datacache) ||
(esz == sizeof(npy_intp) && cache == dimcache));
if (nelem < msz) {
if (cache[nelem].available > 0) {
return cache[nelem].ptrs[--(cache[nelem].available)];
}
}
return alloc(nelem * esz);
}
/*
* return pointer p to cache, nelem is number of elements of the cache bucket
* size (1 or sizeof(npy_intp)) of the block pointed too
*/
static NPY_INLINE void
_mpy_free_cache(int dev, void * p, npy_uintp nelem, npy_uint msz,
cache_bucket * cache, void (*dealloc)(void *, int))
{
assert(dev >= 0 && dev < NDEVICES);
if (p != NULL && nelem < msz) {
int i = dev*msz + nelem;
if (cache[i].available < NCACHE) {
cache[i].ptrs[cache[i].available++] = p;
return;
}
}
dealloc(p, dev);
}
static NPY_INLINE void
_npy_free_cache(void * p, npy_uintp nelem, npy_uint msz,
cache_bucket * cache, void (*dealloc)(void *))
{
if (p != NULL && nelem < msz) {
if (cache[nelem].available < NCACHE) {
cache[nelem].ptrs[cache[nelem].available++] = p;
return;
}
}
dealloc(p);
}
/*
* array data cache, sz is number of bytes to allocate
*/
NPY_NO_EXPORT void *
mpy_alloc_cache(npy_uintp sz, int device)
{
return _mpy_alloc_cache(device, sz, 1, NBUCKETS, datacache, &PyDataMemMic_NEW);
}
/* zero initialized data, sz is number of bytes to allocate */
NPY_NO_EXPORT void *
mpy_alloc_cache_zero(npy_uintp sz, int device)
{
void * p;
if (sz < NBUCKETS) {
p = _mpy_alloc_cache(device, sz, 1, NBUCKETS, datacache, &PyDataMemMic_NEW);
if (p) {
#pragma omp target device(device) map(to:p,sz)
memset(p, 0, sz);
}
return p;
}
Py_BEGIN_ALLOW_THREADS
p = PyDataMemMic_NEW_ZEROED(sz, 1, device);
Py_END_ALLOW_THREADS
return p;
}
NPY_NO_EXPORT void
mpy_free_cache(void * p, npy_uintp sz, int device)
{
_mpy_free_cache(device, p, sz, NBUCKETS, datacache, &PyDataMemMic_FREE);
}
/*
* dimension/stride cache, uses a different allocator and is always a multiple
* of npy_intp
*/
NPY_NO_EXPORT void *
mpy_alloc_cache_dim(npy_uintp sz)
{
/* dims + strides */
if (NPY_UNLIKELY(sz < 2)) {
sz = 2;
}
return _npy_alloc_cache(sz, sizeof(npy_intp), NBUCKETS_DIM, dimcache,
&PyArray_malloc);
}
NPY_NO_EXPORT void
mpy_free_cache_dim(void * p, npy_uintp sz)
{
/* dims + strides */
if (NPY_UNLIKELY(sz < 2)) {
sz = 2;
}
_npy_free_cache(p, sz, NBUCKETS_DIM, dimcache,
&PyArray_free);
}
/*NUMPY_API
* Allocates memory for array data.
*/
NPY_NO_EXPORT void *
PyDataMemMic_NEW(size_t size, int device)
{
void *result;
result = omp_target_alloc(size, device);
return result;
}
/*NUMPY_API
* Allocates zeroed memory for array data on given device.
*/
NPY_NO_EXPORT void *
PyDataMemMic_NEW_ZEROED(size_t size, size_t elsize, int device)
{
void *result;
#pragma omp target device(device) map(from:result)
result = calloc(size, elsize);
return result;
}
/*NUMPY_API
* Free memory for array data on given device.
*/
NPY_NO_EXPORT void
PyDataMemMic_FREE(void *ptr, int device)
{
omp_target_free(ptr, device);
}
/*NUMPY_API
* Reallocate/resize memory for array data on given divice.
*/
NPY_NO_EXPORT void *
PyDataMemMic_RENEW(void *ptr, size_t size, int device)
{
void *result;
Py_BEGIN_ALLOW_THREADS
#pragma omp target device(device) map(from:result)
result = realloc(ptr, size);
Py_END_ALLOW_THREADS
return result;
}
|
image.c |
/*!
***********************************************************************
* \file image.c
*
* \brief
* Decode a Slice
*
* \author
* Main contributors (see contributors.h for copyright, address and affiliation details)
* - Inge Lille-Langoy <inge.lille-langoy@telenor.com>
* - Rickard Sjoberg <rickard.sjoberg@era.ericsson.se>
* - Jani Lainema <jani.lainema@nokia.com>
* - Sebastian Purreiter <sebastian.purreiter@mch.siemens.de>
* - Byeong-Moon Jeon <jeonbm@lge.com>
* - Thomas Wedi <wedi@tnt.uni-hannover.de>
* - Gabi Blaettermann
* - Ye-Kui Wang <wyk@ieee.org>
* - Antti Hallapuro <antti.hallapuro@nokia.com>
* - Alexis Tourapis <alexismt@ieee.org>
* - Jill Boyce <jill.boyce@thomson.net>
* - Saurav K Bandyopadhyay <saurav@ieee.org>
* - Zhenyu Wu <Zhenyu.Wu@thomson.net
* - Purvin Pandit <Purvin.Pandit@thomson.net>
*
***********************************************************************
*/
#include "contributors.h"
#include <math.h>
#include <limits.h>
#include "global.h"
#include "image.h"
#include "fmo.h"
#include "annexb.h"
#include "nalu.h"
#include "parset.h"
#include "header.h"
#include "sei.h"
#include "output.h"
#include "mb_access.h"
#include "memalloc.h"
#include "macroblock.h"
#include "loopfilter.h"
#include "biaridecod.h"
#include "context_ini.h"
#include "cabac.h"
#include "vlc.h"
#include "quant.h"
#include "errorconcealment.h"
#include "erc_api.h"
#include "mbuffer_mvc.h"
#include "fast_memory.h"
#include "mc_prediction.h"
extern int testEndian(void);
void reorder_lists(Slice *currSlice);
static inline void reset_mbs(Macroblock *currMB)
{
currMB->slice_nr = -1;
currMB->ei_flag = 1;
currMB->dpl_flag = 0;
}
static inline void reset_mv_info(PicMotionParams *mv_info)
{
mv_info->ref_pic[LIST_0] = NULL;
mv_info->ref_pic[LIST_1] = NULL;
mv_info->mv[LIST_0] = zero_mv;
mv_info->mv[LIST_1] = zero_mv;
mv_info->ref_idx[LIST_0] = -1;
mv_info->ref_idx[LIST_1] = -1;
}
/*!
************************************************************************
* \brief
* init macroblock I and P frames
************************************************************************
*/
void init_all_macroblocks(StorablePicture *dec_picture)
{
int j;
PicMotionParams *mv_info = dec_picture->mv_info[0];
// reset vectors and pred. modes
for(j = 0; j < ((dec_picture->size_x * dec_picture->size_y) >> 4); ++j)
{
reset_mv_info(mv_info++);
}
}
/*!
************************************************************************
* \brief
* Initializes the parameters for a new picture
************************************************************************
*/
// static void init_picture(VideoParameters *p_Vid, Slice *currSlice, InputParameters *p_Inp)
// {
// int i;
// int nplane;
// StorablePicture *dec_picture = NULL;
// seq_parameter_set_rbsp_t *active_sps = p_Vid->active_sps;
// DecodedPictureBuffer *p_Dpb = currSlice->p_Dpb;
// p_Vid->bFrameInit = 1;
// if (p_Vid->dec_picture) // && p_Vid->num_dec_mb == p_Vid->PicSizeInMbs)
// {
// // this may only happen on slice loss
// exit_picture(p_Vid, &p_Vid->dec_picture);
// }
// if (p_Vid->recovery_point)
// p_Vid->recovery_frame_num = (currSlice->frame_num + p_Vid->recovery_frame_cnt) % p_Vid->MaxFrameNum;
// if (currSlice->idr_flag)
// p_Vid->recovery_frame_num = currSlice->frame_num;
// if (p_Vid->recovery_point == 0 &&
// currSlice->frame_num != p_Vid->pre_frame_num &&
// currSlice->frame_num != (p_Vid->pre_frame_num + 1) % p_Vid->MaxFrameNum)
// {
// if (active_sps->gaps_in_frame_num_value_allowed_flag == 0)
// {
// // picture error concealment
// if(p_Inp->conceal_mode !=0)
// {
// if((currSlice->frame_num) < ((p_Vid->pre_frame_num + 1) % p_Vid->MaxFrameNum))
// {
// /* Conceal lost IDR frames and any frames immediately
// following the IDR. Use frame copy for these since
// lists cannot be formed correctly for motion copy*/
// p_Vid->conceal_mode = 1;
// p_Vid->IDR_concealment_flag = 1;
// conceal_lost_frames(p_Dpb, currSlice);
// //reset to original concealment mode for future drops
// p_Vid->conceal_mode = p_Inp->conceal_mode;
// }
// else
// {
// //reset to original concealment mode for future drops
// p_Vid->conceal_mode = p_Inp->conceal_mode;
// p_Vid->IDR_concealment_flag = 0;
// conceal_lost_frames(p_Dpb, currSlice);
// }
// }
// else
// { /* Advanced Error Concealment would be called here to combat unintentional loss of pictures. */
// error("An unintentional loss of pictures occurs! Exit\n", 100);
// }
// }
// if(p_Vid->conceal_mode == 0)
// fill_frame_num_gap(p_Vid, currSlice);
// }
// if(currSlice->nal_reference_idc)
// {
// p_Vid->pre_frame_num = currSlice->frame_num;
// }
// //p_Vid->num_dec_mb = 0;
// //calculate POC
// decode_poc(p_Vid, currSlice);
// if (p_Vid->recovery_frame_num == (int) currSlice->frame_num && p_Vid->recovery_poc == 0x7fffffff)
// p_Vid->recovery_poc = currSlice->framepoc;
// if(currSlice->nal_reference_idc)
// p_Vid->last_ref_pic_poc = currSlice->framepoc;
// // dumppoc (p_Vid);
// if (currSlice->structure==FRAME ||currSlice->structure==TOP_FIELD)
// {
// gettime (&(p_Vid->start_time)); // start time
// }
// dec_picture = p_Vid->dec_picture = alloc_storable_picture (p_Vid, currSlice->structure, p_Vid->width, p_Vid->height, p_Vid->width_cr, p_Vid->height_cr);
// dec_picture->top_poc=currSlice->toppoc;
// dec_picture->bottom_poc=currSlice->bottompoc;
// dec_picture->frame_poc=currSlice->framepoc;
// dec_picture->qp = currSlice->qp;
// dec_picture->slice_qp_delta = currSlice->slice_qp_delta;
// dec_picture->chroma_qp_offset[0] = p_Vid->active_pps->chroma_qp_index_offset;
// dec_picture->chroma_qp_offset[1] = p_Vid->active_pps->second_chroma_qp_index_offset;
// dec_picture->iCodingType = currSlice->structure==FRAME? (currSlice->mb_aff_frame_flag? FRAME_MB_PAIR_CODING:FRAME_CODING): FIELD_CODING; //currSlice->slice_type;
// #if (MVC_EXTENSION_ENABLE)
// dec_picture->view_id = currSlice->view_id;
// dec_picture->inter_view_flag = currSlice->inter_view_flag;
// dec_picture->anchor_pic_flag = currSlice->anchor_pic_flag;
// #endif
// // reset all variables of the error concealment instance before decoding of every frame.
// // here the third parameter should, if perfectly, be equal to the number of slices per frame.
// // using little value is ok, the code will allocate more memory if the slice number is larger
// ercReset(p_Vid->erc_errorVar, p_Vid->PicSizeInMbs, p_Vid->PicSizeInMbs, dec_picture->size_x);
// p_Vid->erc_mvperMB = 0;
// switch (currSlice->structure )
// {
// case TOP_FIELD:
// {
// dec_picture->poc = currSlice->toppoc;
// p_Vid->number *= 2;
// break;
// }
// case BOTTOM_FIELD:
// {
// dec_picture->poc = currSlice->bottompoc;
// p_Vid->number = p_Vid->number * 2 + 1;
// break;
// }
// case FRAME:
// {
// dec_picture->poc = currSlice->framepoc;
// break;
// }
// default:
// error("p_Vid->structure not initialized", 235);
// }
// //p_Vid->current_slice_nr=0;
// if (p_Vid->type > SI_SLICE)
// {
// set_ec_flag(p_Vid, SE_PTYPE);
// p_Vid->type = P_SLICE; // concealed element
// }
// // CAVLC init
// if (p_Vid->active_pps->entropy_coding_mode_flag == CAVLC)
// {
// memset(p_Vid->nz_coeff[0][0][0], -1, p_Vid->PicSizeInMbs * 48 *sizeof(byte)); // 3 * 4 * 4
// }
// // Set the slice_nr member of each MB to -1, to ensure correct when packet loss occurs
// // TO set Macroblock Map (mark all MBs as 'have to be concealed')
// if( (p_Vid->separate_colour_plane_flag != 0) )
// {
// for( nplane=0; nplane<MAX_PLANE; ++nplane )
// {
// Macroblock *currMB = p_Vid->mb_data_JV[nplane];
// char *intra_block = p_Vid->intra_block_JV[nplane];
// for(i=0; i<(int)p_Vid->PicSizeInMbs; ++i)
// {
// reset_mbs(currMB++);
// }
// fast_memset(p_Vid->ipredmode_JV[nplane][0], DC_PRED, 16 * p_Vid->FrameHeightInMbs * p_Vid->PicWidthInMbs * sizeof(char));
// if(p_Vid->active_pps->constrained_intra_pred_flag)
// {
// for (i=0; i<(int)p_Vid->PicSizeInMbs; ++i)
// {
// intra_block[i] = 1;
// }
// }
// }
// }
// else
// {
// #if 0 //defined(OPENMP)
// #pragma omp parallel for
// for(i=0; i<(int)p_Vid->PicSizeInMbs; ++i)
// reset_mbs(&p_Vid->mb_data[i]);
// #else
// Macroblock *currMB = p_Vid->mb_data;
// for(i=0; i<(int)p_Vid->PicSizeInMbs; ++i)
// reset_mbs(currMB++);
// #endif
// if(p_Vid->active_pps->constrained_intra_pred_flag)
// {
// for (i=0; i<(int)p_Vid->PicSizeInMbs; ++i)
// {
// p_Vid->intra_block[i] = 1;
// }
// }
// fast_memset(p_Vid->ipredmode[0], DC_PRED, 16 * p_Vid->FrameHeightInMbs * p_Vid->PicWidthInMbs * sizeof(char));
// }
// dec_picture->slice_type = p_Vid->type;
// dec_picture->used_for_reference = (currSlice->nal_reference_idc != 0);
// dec_picture->idr_flag = currSlice->idr_flag;
// dec_picture->no_output_of_prior_pics_flag = currSlice->no_output_of_prior_pics_flag;
// dec_picture->long_term_reference_flag = currSlice->long_term_reference_flag;
// dec_picture->adaptive_ref_pic_buffering_flag = currSlice->adaptive_ref_pic_buffering_flag;
// dec_picture->dec_ref_pic_marking_buffer = currSlice->dec_ref_pic_marking_buffer;
// currSlice->dec_ref_pic_marking_buffer = NULL;
// dec_picture->mb_aff_frame_flag = currSlice->mb_aff_frame_flag;
// dec_picture->PicWidthInMbs = p_Vid->PicWidthInMbs;
// p_Vid->get_mb_block_pos = dec_picture->mb_aff_frame_flag ? get_mb_block_pos_mbaff : get_mb_block_pos_normal;
// p_Vid->getNeighbour = dec_picture->mb_aff_frame_flag ? getAffNeighbour : getNonAffNeighbour;
// dec_picture->pic_num = currSlice->frame_num;
// dec_picture->frame_num = currSlice->frame_num;
// dec_picture->recovery_frame = (unsigned int) ((int) currSlice->frame_num == p_Vid->recovery_frame_num);
// dec_picture->coded_frame = (currSlice->structure==FRAME);
// dec_picture->chroma_format_idc = active_sps->chroma_format_idc;
// dec_picture->frame_mbs_only_flag = active_sps->frame_mbs_only_flag;
// dec_picture->frame_cropping_flag = active_sps->frame_cropping_flag;
// if (dec_picture->frame_cropping_flag)
// {
// dec_picture->frame_cropping_rect_left_offset = active_sps->frame_cropping_rect_left_offset;
// dec_picture->frame_cropping_rect_right_offset = active_sps->frame_cropping_rect_right_offset;
// dec_picture->frame_cropping_rect_top_offset = active_sps->frame_cropping_rect_top_offset;
// dec_picture->frame_cropping_rect_bottom_offset = active_sps->frame_cropping_rect_bottom_offset;
// }
// #if (ENABLE_OUTPUT_TONEMAPPING)
// // store the necessary tone mapping sei into StorablePicture structure
// if (p_Vid->seiToneMapping->seiHasTone_mapping)
// {
// int coded_data_bit_max = (1 << p_Vid->seiToneMapping->coded_data_bit_depth);
// dec_picture->seiHasTone_mapping = 1;
// dec_picture->tone_mapping_model_id = p_Vid->seiToneMapping->model_id;
// dec_picture->tonemapped_bit_depth = p_Vid->seiToneMapping->sei_bit_depth;
// dec_picture->tone_mapping_lut = malloc(coded_data_bit_max * sizeof(int));
// if (NULL == dec_picture->tone_mapping_lut)
// {
// no_mem_exit("init_picture: tone_mapping_lut");
// }
// memcpy(dec_picture->tone_mapping_lut, p_Vid->seiToneMapping->lut, sizeof(imgpel) * coded_data_bit_max);
// update_tone_mapping_sei(p_Vid->seiToneMapping);
// }
// else
// dec_picture->seiHasTone_mapping = 0;
// #endif
// if( (p_Vid->separate_colour_plane_flag != 0) )
// {
// p_Vid->dec_picture_JV[0] = p_Vid->dec_picture;
// p_Vid->dec_picture_JV[1] = alloc_storable_picture (p_Vid, (PictureStructure) currSlice->structure, p_Vid->width, p_Vid->height, p_Vid->width_cr, p_Vid->height_cr);
// copy_dec_picture_JV( p_Vid, p_Vid->dec_picture_JV[1], p_Vid->dec_picture_JV[0] );
// p_Vid->dec_picture_JV[2] = alloc_storable_picture (p_Vid, (PictureStructure) currSlice->structure, p_Vid->width, p_Vid->height, p_Vid->width_cr, p_Vid->height_cr);
// copy_dec_picture_JV( p_Vid, p_Vid->dec_picture_JV[2], p_Vid->dec_picture_JV[0] );
// }
// }
static void update_mbaff_macroblock_data(imgpel **cur_img, imgpel (*temp)[16], int x0, int width, int height)
{
imgpel (*temp_evn)[16] = temp;
imgpel (*temp_odd)[16] = temp + height;
imgpel **temp_img = cur_img;
int y;
for (y = 0; y < 2 * height; ++y)
memcpy(*temp++, (*temp_img++ + x0), width * sizeof(imgpel));
for (y = 0; y < height; ++y)
{
memcpy((*cur_img++ + x0), *temp_evn++, width * sizeof(imgpel));
memcpy((*cur_img++ + x0), *temp_odd++, width * sizeof(imgpel));
}
}
// static void MbAffPostProc(VideoParameters *p_Vid)
// {
// imgpel temp_buffer[32][16];
// StorablePicture *dec_picture = p_Vid->dec_picture;
// imgpel ** imgY = dec_picture->imgY;
// imgpel ***imgUV = dec_picture->imgUV;
// short i, x0, y0;
// for (i=0; i<(int)dec_picture->PicSizeInMbs; i+=2)
// {
// if (dec_picture->motion.mb_field[i])
// {
// get_mb_pos(p_Vid, i, p_Vid->mb_size[IS_LUMA], &x0, &y0);
// update_mbaff_macroblock_data(imgY + y0, temp_buffer, x0, MB_BLOCK_SIZE, MB_BLOCK_SIZE);
// if (dec_picture->chroma_format_idc != YUV400)
// {
// x0 = (short) ((x0 * p_Vid->mb_cr_size_x) >> 4);
// y0 = (short) ((y0 * p_Vid->mb_cr_size_y) >> 4);
// update_mbaff_macroblock_data(imgUV[0] + y0, temp_buffer, x0, p_Vid->mb_cr_size_x, p_Vid->mb_cr_size_y);
// update_mbaff_macroblock_data(imgUV[1] + y0, temp_buffer, x0, p_Vid->mb_cr_size_x, p_Vid->mb_cr_size_y);
// }
// }
// }
// }
static void fill_wp_params(Slice *currSlice)
{
if (currSlice->slice_type == B_SLICE)
{
int i, j, k;
int comp;
int log_weight_denom;
int tb, td;
int tx,DistScaleFactor;
int max_l0_ref = currSlice->num_ref_idx_active[LIST_0];
int max_l1_ref = currSlice->num_ref_idx_active[LIST_1];
if (currSlice->active_pps->weighted_bipred_idc == 2)
{
currSlice->luma_log2_weight_denom = 5;
currSlice->chroma_log2_weight_denom = 5;
currSlice->wp_round_luma = 16;
currSlice->wp_round_chroma = 16;
for (i=0; i<MAX_REFERENCE_PICTURES; ++i)
{
for (comp=0; comp<3; ++comp)
{
log_weight_denom = (comp == 0) ? currSlice->luma_log2_weight_denom : currSlice->chroma_log2_weight_denom;
currSlice->wp_weight[0][i][comp] = 1 << log_weight_denom;
currSlice->wp_weight[1][i][comp] = 1 << log_weight_denom;
currSlice->wp_offset[0][i][comp] = 0;
currSlice->wp_offset[1][i][comp] = 0;
}
}
}
for (i=0; i<max_l0_ref; ++i)
{
for (j=0; j<max_l1_ref; ++j)
{
for (comp = 0; comp<3; ++comp)
{
log_weight_denom = (comp == 0) ? currSlice->luma_log2_weight_denom : currSlice->chroma_log2_weight_denom;
if (currSlice->active_pps->weighted_bipred_idc == 1)
{
currSlice->wbp_weight[0][i][j][comp] = currSlice->wp_weight[0][i][comp];
currSlice->wbp_weight[1][i][j][comp] = currSlice->wp_weight[1][j][comp];
}
else if (currSlice->active_pps->weighted_bipred_idc == 2)
{
td = iClip3(-128,127,currSlice->listX[LIST_1][j]->poc - currSlice->listX[LIST_0][i]->poc);
if (td == 0 || currSlice->listX[LIST_1][j]->is_long_term || currSlice->listX[LIST_0][i]->is_long_term)
{
currSlice->wbp_weight[0][i][j][comp] = 32;
currSlice->wbp_weight[1][i][j][comp] = 32;
}
else
{
tb = iClip3(-128,127,currSlice->ThisPOC - currSlice->listX[LIST_0][i]->poc);
tx = (16384 + iabs(td/2))/td;
DistScaleFactor = iClip3(-1024, 1023, (tx*tb + 32 )>>6);
currSlice->wbp_weight[1][i][j][comp] = DistScaleFactor >> 2;
currSlice->wbp_weight[0][i][j][comp] = 64 - currSlice->wbp_weight[1][i][j][comp];
if (currSlice->wbp_weight[1][i][j][comp] < -64 || currSlice->wbp_weight[1][i][j][comp] > 128)
{
currSlice->wbp_weight[0][i][j][comp] = 32;
currSlice->wbp_weight[1][i][j][comp] = 32;
currSlice->wp_offset[0][i][comp] = 0;
currSlice->wp_offset[1][j][comp] = 0;
}
}
}
}
}
}
if (currSlice->mb_aff_frame_flag)
{
for (i=0; i<2*max_l0_ref; ++i)
{
for (j=0; j<2*max_l1_ref; ++j)
{
for (comp = 0; comp<3; ++comp)
{
for (k=2; k<6; k+=2)
{
currSlice->wp_offset[k+0][i][comp] = currSlice->wp_offset[0][i>>1][comp];
currSlice->wp_offset[k+1][j][comp] = currSlice->wp_offset[1][j>>1][comp];
log_weight_denom = (comp == 0) ? currSlice->luma_log2_weight_denom : currSlice->chroma_log2_weight_denom;
if (currSlice->active_pps->weighted_bipred_idc == 1)
{
currSlice->wbp_weight[k+0][i][j][comp] = currSlice->wp_weight[0][i>>1][comp];
currSlice->wbp_weight[k+1][i][j][comp] = currSlice->wp_weight[1][j>>1][comp];
}
else if (currSlice->active_pps->weighted_bipred_idc == 2)
{
td = iClip3(-128, 127, currSlice->listX[k+LIST_1][j]->poc - currSlice->listX[k+LIST_0][i]->poc);
if (td == 0 || currSlice->listX[k+LIST_1][j]->is_long_term || currSlice->listX[k+LIST_0][i]->is_long_term)
{
currSlice->wbp_weight[k+0][i][j][comp] = 32;
currSlice->wbp_weight[k+1][i][j][comp] = 32;
}
else
{
tb = iClip3(-128,127,((k==2)?currSlice->toppoc:currSlice->bottompoc) - currSlice->listX[k+LIST_0][i]->poc);
tx = (16384 + iabs(td/2))/td;
DistScaleFactor = iClip3(-1024, 1023, (tx*tb + 32 )>>6);
currSlice->wbp_weight[k+1][i][j][comp] = DistScaleFactor >> 2;
currSlice->wbp_weight[k+0][i][j][comp] = 64 - currSlice->wbp_weight[k+1][i][j][comp];
if (currSlice->wbp_weight[k+1][i][j][comp] < -64 || currSlice->wbp_weight[k+1][i][j][comp] > 128)
{
currSlice->wbp_weight[k+1][i][j][comp] = 32;
currSlice->wbp_weight[k+0][i][j][comp] = 32;
currSlice->wp_offset[k+0][i][comp] = 0;
currSlice->wp_offset[k+1][j][comp] = 0;
}
}
}
}
}
}
}
}
}
}
//static void init_picture_decoding(VideoParameters *p_Vid)
//{
// Slice *pSlice = p_Vid->ppSliceList[0];
// int j, i, iSliceNo, iDeblockMode=1;
// Macroblock *pMBData;
//
// if(p_Vid->iSliceNumOfCurrPic >= MAX_NUM_SLICES)
// {
// error ("Maximum number of supported slices exceeded. \nPlease recompile with increased value for MAX_NUM_SLICES", 200);
// }
//
// if(p_Vid->pNextPPS->Valid && (int) p_Vid->pNextPPS->pic_parameter_set_id == pSlice->pic_parameter_set_id)
// {
// pic_parameter_set_rbsp_t tmpPPS;
// memcpy(&tmpPPS, p_Vid->active_pps, sizeof (pic_parameter_set_rbsp_t));
// p_Vid->active_pps->slice_group_id = NULL;
// MakePPSavailable (p_Vid, p_Vid->pNextPPS->pic_parameter_set_id, p_Vid->pNextPPS);
// memcpy(p_Vid->pNextPPS, &tmpPPS, sizeof (pic_parameter_set_rbsp_t));
// tmpPPS.slice_group_id = NULL;
// }
//
// UseParameterSet (pSlice);
// if(pSlice->idr_flag)
// p_Vid->number=0;
//
// p_Vid->PicHeightInMbs = p_Vid->FrameHeightInMbs / ( 1 + pSlice->field_pic_flag );
// p_Vid->PicSizeInMbs = p_Vid->PicWidthInMbs * p_Vid->PicHeightInMbs;
// p_Vid->FrameSizeInMbs = p_Vid->PicWidthInMbs * p_Vid->FrameHeightInMbs;
// p_Vid->structure = pSlice->structure;
//
// fmo_init (p_Vid, pSlice);
//
//#if (MVC_EXTENSION_ENABLE)
// update_ref_list(p_Vid->p_Dpb, pSlice->view_id);
// update_ltref_list(p_Vid->p_Dpb, pSlice->view_id);
// update_pic_num(pSlice);
// i = pSlice->view_id;
//#else
// update_pic_num(pSlice);
// i = 0;
//#endif
// init_Deblock(p_Vid, pSlice->mb_aff_frame_flag);
// //init mb_data;
//
// for(j=0; j<p_Vid->iSliceNumOfCurrPic; j++)
// {
// if(p_Vid->ppSliceList[j]->DFDisableIdc == 0)
// iDeblockMode=0;
//#if (MVC_EXTENSION_ENABLE)
// assert(p_Vid->ppSliceList[j]->view_id == i);
//#endif
// }
// p_Vid->iDeblockMode = iDeblockMode;
//
// if(p_Vid->iDeblockMode == 1)
// {
// for(j=0; j<p_Vid->iSliceNumOfCurrPic; j++)
// {
// pSlice = p_Vid->ppSliceList[j];
// iSliceNo = pSlice->current_slice_nr;
// if((p_Vid->separate_colour_plane_flag != 0))
// pMBData = p_Vid->mb_data_JV[pSlice->colour_plane_id];
// else
// pMBData = p_Vid->mb_data;
// for(i = pSlice->start_mb_nr * (1 + pSlice->mb_aff_frame_flag); i < pSlice->end_mb_nr_plus1 * (1 + pSlice->mb_aff_frame_flag); i++)
// pMBData[i].slice_nr = (short) iSliceNo;
// }
// }
//}
//void init_slice(VideoParameters *p_Vid, Slice *currSlice)
//{
// int i;
// p_Vid->active_sps = currSlice->active_sps;
// p_Vid->active_pps = currSlice->active_pps;
//
//#if (MVC_EXTENSION_ENABLE)
// //update_ref_list(p_Vid->p_Dpb, currSlice->view_id);
// //update_ltref_list(p_Vid->p_Dpb, currSlice->view_id);
// //update_pic_num(currSlice);
//
// currSlice->init_lists(currSlice);
//
// if (currSlice->svc_extension_flag == 0 || currSlice->svc_extension_flag == 1)
// reorder_lists_mvc (currSlice, currSlice->ThisPOC);
// else
// reorder_lists (currSlice);
//
// if (currSlice->fs_listinterview0)
// {
// free(currSlice->fs_listinterview0);
// currSlice->fs_listinterview0 = NULL;
// }
// if (currSlice->fs_listinterview1)
// {
// free(currSlice->fs_listinterview1);
// currSlice->fs_listinterview1 = NULL;
// }
//#else
// //update_pic_num(currSlice);
// currSlice->init_lists (currSlice);
// reorder_lists (currSlice);
//#endif
//
// if (currSlice->structure==FRAME)
// {
// init_mbaff_lists(p_Vid, currSlice);
// }
// //p_Vid->recovery_point = 0;
//
// // update reference flags and set current p_Vid->ref_flag
// if(!(currSlice->redundant_pic_cnt != 0 && p_Vid->previous_frame_num == currSlice->frame_num))
// {
// for(i=16;i>0;i--)
// {
// currSlice->ref_flag[i] = currSlice->ref_flag[i-1];
// }
// }
// currSlice->ref_flag[0] = currSlice->redundant_pic_cnt==0 ? p_Vid->Is_primary_correct : p_Vid->Is_redundant_correct;
// //p_Vid->previous_frame_num = currSlice->frame_num; //p_Vid->frame_num;
//
// if((currSlice->active_sps->chroma_format_idc==0)||(currSlice->active_sps->chroma_format_idc==3))
// {
// currSlice->linfo_cbp_intra = linfo_cbp_intra_other;
// currSlice->linfo_cbp_inter = linfo_cbp_inter_other;
// }
// else
// {
// currSlice->linfo_cbp_intra = linfo_cbp_intra_normal;
// currSlice->linfo_cbp_inter = linfo_cbp_inter_normal;
// }
//}
//
//void decode_slice(Slice *currSlice, int current_header)
//{
// VideoParameters *p_Vid = currSlice->p_Vid;
// int iScale = (1+currSlice->mb_aff_frame_flag);
// if (currSlice->active_pps->entropy_coding_mode_flag)
// {
// init_contexts (currSlice);
// cabac_new_slice(currSlice);
// }
//
// if ( (currSlice->active_pps->weighted_bipred_idc > 0 && (currSlice->slice_type == B_SLICE)) || (currSlice->active_pps->weighted_pred_flag && currSlice->slice_type !=I_SLICE))
// fill_wp_params(currSlice);
//
// //printf("frame picture %d %d %d\n",currSlice->structure,currSlice->ThisPOC,currSlice->direct_spatial_mv_pred_flag);
//
// // decode main slice information
// if ((current_header == SOP || current_header == SOS) && currSlice->ei_flag == 0)
// decode_one_slice(currSlice);
//
// // setMB-Nr in case this slice was lost
// // if(currSlice->ei_flag)
// // p_Vid->current_mb_nr = currSlice->last_mb_nr + 1;
//
// //deblocking for frame or field
// if(p_Vid->iDeblockMode && (p_Vid->bDeblockEnable & (1<<(p_Vid->dec_picture->used_for_reference))))
// {
// if((p_Vid->separate_colour_plane_flag != 0) )
// {
// change_plane_JV(p_Vid, currSlice->colour_plane_id, currSlice);
// DeblockPicturePartially(p_Vid, currSlice->dec_picture, currSlice->start_mb_nr*iScale, currSlice->end_mb_nr_plus1*iScale);
// }
// else
// {
// DeblockPicturePartially(p_Vid, currSlice->dec_picture, currSlice->start_mb_nr*iScale, currSlice->end_mb_nr_plus1*iScale);
// }
// }
//}
/*!
************************************************************************
* \brief
* Error tracking: if current frame is lost or any reference frame of
* current frame is lost, current frame is incorrect.
************************************************************************
*/
static void Error_tracking(VideoParameters *p_Vid, Slice *currSlice)
{
int i;
if(currSlice->redundant_pic_cnt == 0)
{
p_Vid->Is_primary_correct = p_Vid->Is_redundant_correct = 1;
}
if(currSlice->redundant_pic_cnt == 0 && p_Vid->type != I_SLICE)
{
for(i=0;i<currSlice->num_ref_idx_active[LIST_0];++i)
{
if(currSlice->ref_flag[i] == 0) // any reference of primary slice is incorrect
{
p_Vid->Is_primary_correct = 0; // primary slice is incorrect
}
}
}
else if(currSlice->redundant_pic_cnt != 0 && p_Vid->type != I_SLICE)
{
if(currSlice->ref_flag[currSlice->redundant_slice_ref_idx] == 0) // reference of redundant slice is incorrect
{
p_Vid->Is_redundant_correct = 0; // redundant slice is incorrect
}
}
}
static void CopyPOC(Slice *pSlice0, Slice *currSlice)
{
currSlice->framepoc = pSlice0->framepoc;
currSlice->toppoc = pSlice0->toppoc;
currSlice->bottompoc = pSlice0->bottompoc;
currSlice->ThisPOC = pSlice0->ThisPOC;
}
/*!
***********************************************************************
* \brief
* decodes one I- or P-frame
*
***********************************************************************
*/
//int decode_one_frame(DecoderParams *pDecoder)
//{
// VideoParameters *p_Vid = pDecoder->p_Vid;
// InputParameters *p_Inp = p_Vid->p_Inp;
// int current_header, iRet;
// Slice *currSlice; // = p_Vid->currentSlice;
// Slice **ppSliceList = p_Vid->ppSliceList;
// int iSliceNo;
//
// //read one picture first;
// p_Vid->iSliceNumOfCurrPic=0;
// current_header=0;
// p_Vid->iNumOfSlicesDecoded=0;
// p_Vid->num_dec_mb = 0;
// if(p_Vid->newframe)
// {
// if(p_Vid->pNextPPS->Valid && (int) p_Vid->pNextPPS->pic_parameter_set_id == p_Vid->pNextSlice->pic_parameter_set_id)
// {
// MakePPSavailable (p_Vid, p_Vid->pNextPPS->pic_parameter_set_id, p_Vid->pNextPPS);
// p_Vid->pNextPPS->Valid=0;
// }
//
// //get the first slice from currentslice;
// assert(ppSliceList[p_Vid->iSliceNumOfCurrPic]);
// currSlice = ppSliceList[p_Vid->iSliceNumOfCurrPic];
// ppSliceList[p_Vid->iSliceNumOfCurrPic] = p_Vid->pNextSlice;
// p_Vid->pNextSlice = currSlice;
// assert(ppSliceList[p_Vid->iSliceNumOfCurrPic]->current_slice_nr == 0);
//
// currSlice = ppSliceList[p_Vid->iSliceNumOfCurrPic];
//
// UseParameterSet (currSlice);
//
// init_picture(p_Vid, currSlice, p_Inp);
//
// p_Vid->iSliceNumOfCurrPic++;
// current_header = SOS;
// }
// while(current_header != SOP && current_header !=EOS)
// {
// //no pending slices;
// assert(p_Vid->iSliceNumOfCurrPic < p_Vid->iNumOfSlicesAllocated);
// if(!ppSliceList[p_Vid->iSliceNumOfCurrPic])
// {
// ppSliceList[p_Vid->iSliceNumOfCurrPic] = malloc_slice(p_Inp, p_Vid);
// }
// currSlice = ppSliceList[p_Vid->iSliceNumOfCurrPic];
//
// //p_Vid->currentSlice = currSlice;
// currSlice->p_Vid = p_Vid;
// currSlice->p_Inp = p_Inp;
// currSlice->p_Dpb = p_Vid->p_Dpb;
// currSlice->next_header = -8888;
// currSlice->num_dec_mb = 0;
// currSlice->coeff_ctr = -1;
// currSlice->pos = 0;
// currSlice->is_reset_coeff = FALSE;
// current_header = read_new_slice(currSlice);
// //init;
// currSlice->current_header = current_header;
//
// // error tracking of primary and redundant slices.
// Error_tracking(p_Vid, currSlice);
// // If primary and redundant are received and primary is correct, discard the redundant
// // else, primary slice will be replaced with redundant slice.
// if(currSlice->frame_num == p_Vid->previous_frame_num && currSlice->redundant_pic_cnt !=0
// && p_Vid->Is_primary_correct !=0 && current_header != EOS)
// {
// continue;
// }
//
// if((current_header != SOP && current_header !=EOS) || (p_Vid->iSliceNumOfCurrPic==0 && current_header == SOP))
// {
// currSlice->current_slice_nr = (short) p_Vid->iSliceNumOfCurrPic;
// p_Vid->dec_picture->max_slice_id = (short) imax(currSlice->current_slice_nr, p_Vid->dec_picture->max_slice_id);
// if(p_Vid->iSliceNumOfCurrPic >0)
// {
// CopyPOC(*ppSliceList, currSlice);
// ppSliceList[p_Vid->iSliceNumOfCurrPic-1]->end_mb_nr_plus1 = currSlice->start_mb_nr;
// }
// p_Vid->iSliceNumOfCurrPic++;
// if(p_Vid->iSliceNumOfCurrPic >= p_Vid->iNumOfSlicesAllocated)
// {
// Slice **tmpSliceList = (Slice **)realloc(p_Vid->ppSliceList, (p_Vid->iNumOfSlicesAllocated+MAX_NUM_DECSLICES)*sizeof(Slice*));
// if(!tmpSliceList)
// {
// tmpSliceList = calloc((p_Vid->iNumOfSlicesAllocated+MAX_NUM_DECSLICES), sizeof(Slice*));
// memcpy(tmpSliceList, p_Vid->ppSliceList, p_Vid->iSliceNumOfCurrPic*sizeof(Slice*));
// //free;
// free(p_Vid->ppSliceList);
// ppSliceList = p_Vid->ppSliceList = tmpSliceList;
// }
// else
// {
// //assert(tmpSliceList == p_Vid->ppSliceList);
// ppSliceList = p_Vid->ppSliceList = tmpSliceList;
// memset(p_Vid->ppSliceList+p_Vid->iSliceNumOfCurrPic, 0, sizeof(Slice*)*MAX_NUM_DECSLICES);
// }
// p_Vid->iNumOfSlicesAllocated += MAX_NUM_DECSLICES;
// }
// current_header = SOS;
// }
// else
// {
// if(ppSliceList[p_Vid->iSliceNumOfCurrPic-1]->mb_aff_frame_flag)
// ppSliceList[p_Vid->iSliceNumOfCurrPic-1]->end_mb_nr_plus1 = p_Vid->FrameSizeInMbs/2;
// else
// ppSliceList[p_Vid->iSliceNumOfCurrPic-1]->end_mb_nr_plus1 = p_Vid->FrameSizeInMbs/(1+ppSliceList[p_Vid->iSliceNumOfCurrPic-1]->field_pic_flag);
// p_Vid->newframe = 1;
// currSlice->current_slice_nr = 0;
// //keep it in currentslice;
// ppSliceList[p_Vid->iSliceNumOfCurrPic] = p_Vid->pNextSlice;
// p_Vid->pNextSlice = currSlice;
// }
//
// CopySliceInfo(currSlice, p_Vid->old_slice);
// }
// iRet = current_header;
// init_picture_decoding(p_Vid);
//
// {
// for(iSliceNo=0; iSliceNo<p_Vid->iSliceNumOfCurrPic; iSliceNo++)
// {
// currSlice = ppSliceList[iSliceNo];
// current_header = currSlice->current_header;
// //p_Vid->currentSlice = currSlice;
//
// assert(current_header != EOS);
// assert(currSlice->current_slice_nr == iSliceNo);
//
// init_slice(p_Vid, currSlice);
// decode_slice(currSlice, current_header);
//
// p_Vid->iNumOfSlicesDecoded++;
// p_Vid->num_dec_mb += currSlice->num_dec_mb;
// p_Vid->erc_mvperMB += currSlice->erc_mvperMB;
// }
// }
// exit_picture(p_Vid, &p_Vid->dec_picture);
// p_Vid->previous_frame_num = ppSliceList[0]->frame_num;
// return (iRet);
//}
/*!
************************************************************************
* \brief
* Convert file read buffer to source picture structure
* \param imgX
* Pointer to image plane
* \param buf
* Buffer for file output
* \param size_x
* horizontal image size in pixel
* \param size_y
* vertical image size in pixel
* \param symbol_size_in_bytes
* number of bytes used per pel
************************************************************************
*/
//void buffer2img (imgpel** imgX, unsigned char* buf, int size_x, int size_y, int symbol_size_in_bytes)
//{
// int i,j;
//
// uint16 tmp16, ui16;
// unsigned long tmp32, ui32;
//
// if (symbol_size_in_bytes> sizeof(imgpel))
// {
// error ("Source picture has higher bit depth than imgpel data type. \nPlease recompile with larger data type for imgpel.", 500);
// }
//
// if (( sizeof(char) == sizeof (imgpel)) && ( sizeof(char) == symbol_size_in_bytes))
// {
// // imgpel == pixel_in_file == 1 byte -> simple copy
// fast_memcpy(&imgX[0][0], buf, size_x * size_y);
// }
// else
// {
// // sizeof (imgpel) > sizeof(char)
// if (testEndian())
// {
// // big endian
// switch (symbol_size_in_bytes)
// {
// case 1:
// {
// for(j = 0; j < size_y; ++j)
// for(i = 0; i < size_x; ++i)
// {
// imgX[j][i]= buf[i+j*size_x];
// }
// break;
// }
// case 2:
// {
// for(j=0;j<size_y;++j)
// for(i=0;i<size_x;++i)
// {
// memcpy(&tmp16, buf+((i+j*size_x)*2), 2);
// ui16 = (uint16) ((tmp16 >> 8) | ((tmp16&0xFF)<<8));
// imgX[j][i] = (imgpel) ui16;
// }
// break;
// }
// case 4:
// {
// for(j=0;j<size_y;++j)
// for(i=0;i<size_x;++i)
// {
// memcpy(&tmp32, buf+((i+j*size_x)*4), 4);
// ui32 = ((tmp32&0xFF00)<<8) | ((tmp32&0xFF)<<24) | ((tmp32&0xFF0000)>>8) | ((tmp32&0xFF000000)>>24);
// imgX[j][i] = (imgpel) ui32;
// }
// }
// default:
// {
// error ("reading only from formats of 8, 16 or 32 bit allowed on big endian architecture", 500);
// break;
// }
// }
//
// }
// else
// {
// // little endian
// if (symbol_size_in_bytes == 1)
// {
// for (j=0; j < size_y; ++j)
// {
// for (i=0; i < size_x; ++i)
// {
// imgX[j][i]=*(buf++);
// }
// }
// }
// else
// {
// for (j=0; j < size_y; ++j)
// {
// int jpos = j*size_x;
// for (i=0; i < size_x; ++i)
// {
// imgX[j][i]=0;
// memcpy(&(imgX[j][i]), buf +((i+jpos)*symbol_size_in_bytes), symbol_size_in_bytes);
// }
// }
// }
//
// }
// }
//}
/*!
***********************************************************************
* \brief
* compute generic SSE
***********************************************************************
*/
int64 compute_SSE(imgpel **imgRef, imgpel **imgSrc, int xRef, int xSrc, int ySize, int xSize)
{
int i, j;
imgpel *lineRef, *lineSrc;
int64 distortion = 0;
for (j = 0; j < ySize; j++)
{
lineRef = &imgRef[j][xRef];
lineSrc = &imgSrc[j][xSrc];
for (i = 0; i < xSize; i++)
distortion += iabs2( *lineRef++ - *lineSrc++ );
}
return distortion;
}
/*!
************************************************************************
* \brief
* Calculate the value of frame_no
************************************************************************
*/
void calculate_frame_no(VideoParameters *p_Vid, StorablePicture *p)
{
InputParameters *p_Inp = p_Vid->p_Inp;
// calculate frame number
int psnrPOC = p_Vid->active_sps->mb_adaptive_frame_field_flag ? p->poc /(p_Inp->poc_scale) : p->poc/(p_Inp->poc_scale);
if (psnrPOC==0)// && p_Vid->psnr_number)
{
p_Vid->idr_psnr_number = p_Vid->number*p_Vid->ref_poc_gap/(p_Inp->poc_scale);
}
p_Vid->psnr_number=imax(p_Vid->psnr_number,p_Vid->idr_psnr_number+psnrPOC);
p_Vid->frame_no = p_Vid->idr_psnr_number + psnrPOC;
}
/*!
************************************************************************
* \brief
* Find PSNR for all three components.Compare decoded frame with
* the original sequence. Read p_Inp->jumpd frames to reflect frame skipping.
* \param p_Vid
* video encoding parameters for current picture
* \param p
* picture to be compared
* \param p_ref
* file pointer piont to reference YUV reference file
************************************************************************
*/
//void find_snr(VideoParameters *p_Vid,
// StorablePicture *p,
// int *p_ref)
//{
// InputParameters *p_Inp = p_Vid->p_Inp;
// SNRParameters *snr = p_Vid->snr;
//
// int k;
// int ret;
// int64 diff_comp[3] = {0};
// int64 status;
// int symbol_size_in_bytes = (p_Vid->pic_unit_bitsize_on_disk >> 3);
// int comp_size_x[3], comp_size_y[3];
// int64 framesize_in_bytes;
//
// unsigned int max_pix_value_sqd[3];
//
// Boolean rgb_output = (Boolean) (p_Vid->active_sps->vui_seq_parameters.matrix_coefficients==0);
// unsigned char *buf;
// imgpel **cur_ref [3];
// imgpel **cur_comp[3];
// // picture error concealment
// char yuv_types[4][6]= {"4:0:0","4:2:0","4:2:2","4:4:4"};
//
// max_pix_value_sqd[0] = iabs2(p_Vid->max_pel_value_comp[0]);
// max_pix_value_sqd[1] = iabs2(p_Vid->max_pel_value_comp[1]);
// max_pix_value_sqd[2] = iabs2(p_Vid->max_pel_value_comp[2]);
//
// cur_ref[0] = p_Vid->imgY_ref;
// cur_ref[1] = p->chroma_format_idc != YUV400 ? p_Vid->imgUV_ref[0] : NULL;
// cur_ref[2] = p->chroma_format_idc != YUV400 ? p_Vid->imgUV_ref[1] : NULL;
//
// cur_comp[0] = p->imgY;
// cur_comp[1] = p->chroma_format_idc != YUV400 ? p->imgUV[0] : NULL;
// cur_comp[2] = p->chroma_format_idc!= YUV400 ? p->imgUV[1] : NULL;
//
// comp_size_x[0] = p_Inp->source.width[0];
// comp_size_y[0] = p_Inp->source.height[0];
// comp_size_x[1] = comp_size_x[2] = p_Inp->source.width[1];
// comp_size_y[1] = comp_size_y[2] = p_Inp->source.height[1];
//
// framesize_in_bytes = (((int64) comp_size_x[0] * comp_size_y[0]) + ((int64) comp_size_x[1] * comp_size_y[1] ) * 2) * symbol_size_in_bytes;
//
// // KS: this buffer should actually be allocated only once, but this is still much faster than the previous version
// buf = malloc ( comp_size_x[0] * comp_size_y[0] * symbol_size_in_bytes );
//
// if (NULL == buf)
// {
// no_mem_exit("find_snr: buf");
// }
//
// status = lseek (*p_ref, framesize_in_bytes * p_Vid->frame_no, SEEK_SET);
// if (status == -1)
// {
// fprintf(stderr, "Warning: Could not seek to frame number %d in reference file. Shown PSNR might be wrong.\n", p_Vid->frame_no);
// free (buf);
// return;
// }
//
// if(rgb_output)
// lseek (*p_ref, framesize_in_bytes/3, SEEK_CUR);
//
// for (k = 0; k < ((p->chroma_format_idc != YUV400) ? 3 : 1); ++k)
// {
//
// if(rgb_output && k == 2)
// lseek (*p_ref, -framesize_in_bytes, SEEK_CUR);
//
// ret = read(*p_ref, buf, comp_size_x[k] * comp_size_y[k] * symbol_size_in_bytes);
// if (ret != comp_size_x[k] * comp_size_y[k] * symbol_size_in_bytes)
// {
// printf ("Warning: could not read from reconstructed file\n");
// memset (buf, 0, comp_size_x[k] * comp_size_y[k] * symbol_size_in_bytes);
// close(*p_ref);
// *p_ref = -1;
// break;
// }
// buffer2img(cur_ref[k], buf, comp_size_x[k], comp_size_y[k], symbol_size_in_bytes);
//
// // Compute SSE
// diff_comp[k] = compute_SSE(cur_ref[k], cur_comp[k], 0, 0, comp_size_y[k], comp_size_x[k]);
//
// // Collecting SNR statistics
// snr->snr[k] = psnr( max_pix_value_sqd[k], comp_size_x[k] * comp_size_y[k], (float) diff_comp[k]);
//
// if (snr->frame_ctr == 0) // first
// {
// snr->snra[k] = snr->snr[k]; // keep luma snr for first frame
// }
// else
// {
// snr->snra[k] = (float)(snr->snra[k]*(snr->frame_ctr)+snr->snr[k])/(snr->frame_ctr + 1); // average snr chroma for all frames
// }
// }
//
// if(rgb_output)
// lseek (*p_ref, framesize_in_bytes * 2 / 3, SEEK_CUR);
//
// free (buf);
//
// // picture error concealment
// if(p->concealed_pic)
// {
// fprintf(stdout,"%04d(P) %8d %5d %5d %7.4f %7.4f %7.4f %s %5d\n",
// p_Vid->frame_no, p->frame_poc, p->pic_num, p->qp,
// snr->snr[0], snr->snr[1], snr->snr[2], yuv_types[p->chroma_format_idc], 0);
// }
//}
void reorder_lists(Slice *currSlice)
{
VideoParameters *p_Vid = currSlice->p_Vid;
if ((currSlice->slice_type != I_SLICE)&&(currSlice->slice_type != SI_SLICE))
{
if (currSlice->ref_pic_list_reordering_flag[LIST_0])
{
reorder_ref_pic_list(currSlice, LIST_0);
}
if (p_Vid->no_reference_picture == currSlice->listX[0][currSlice->num_ref_idx_active[LIST_0] - 1])
{
if (p_Vid->non_conforming_stream)
printf("RefPicList0[ num_ref_idx_l0_active_minus1 ] is equal to 'no reference picture'\n");
else
error("RefPicList0[ num_ref_idx_l0_active_minus1 ] is equal to 'no reference picture', invalid bitstream",500);
}
// that's a definition
currSlice->listXsize[0] = (char) currSlice->num_ref_idx_active[LIST_0];
}
if (currSlice->slice_type == B_SLICE)
{
if (currSlice->ref_pic_list_reordering_flag[LIST_1])
{
reorder_ref_pic_list(currSlice, LIST_1);
}
if (p_Vid->no_reference_picture == currSlice->listX[1][currSlice->num_ref_idx_active[LIST_1]-1])
{
if (p_Vid->non_conforming_stream)
printf("RefPicList1[ num_ref_idx_l1_active_minus1 ] is equal to 'no reference picture'\n");
else
error("RefPicList1[ num_ref_idx_l1_active_minus1 ] is equal to 'no reference picture', invalid bitstream",500);
}
// that's a definition
currSlice->listXsize[1] = (char) currSlice->num_ref_idx_active[LIST_1];
}
free_ref_pic_list_reordering_buffer(currSlice);
if ( currSlice->slice_type == P_SLICE )
{
#if PRINTREFLIST
unsigned int i;
#if (MVC_EXTENSION_ENABLE)
// print out for debug purpose
if((p_Vid->profile_idc == MVC_HIGH || p_Vid->profile_idc == STEREO_HIGH) && currSlice->current_slice_nr==0)
{
if(currSlice->listXsize[0]>0)
{
printf("\n");
printf(" ** (CurViewID:%d) %s Ref Pic List 0 ****\n", currSlice->view_id, currSlice->structure==FRAME ? "FRM":(currSlice->structure==TOP_FIELD ? "TOP":"BOT"));
for(i=0; i<(unsigned int)(currSlice->listXsize[0]); i++) //ref list 0
{
printf(" %2d -> POC: %4d PicNum: %4d ViewID: %d\n", i, currSlice->listX[0][i]->poc, currSlice->listX[0][i]->pic_num, currSlice->listX[0][i]->view_id);
}
}
}
#endif
#endif
}
else if ( currSlice->slice_type == B_SLICE )
{
#if PRINTREFLIST
unsigned int i;
#if (MVC_EXTENSION_ENABLE)
// print out for debug purpose
if((p_Vid->profile_idc == MVC_HIGH || p_Vid->profile_idc == STEREO_HIGH) && currSlice->current_slice_nr==0)
{
if((currSlice->listXsize[0]>0) || (currSlice->listXsize[1]>0))
printf("\n");
if(currSlice->listXsize[0]>0)
{
printf(" ** (CurViewID:%d) %s Ref Pic List 0 ****\n", currSlice->view_id, currSlice->structure==FRAME ? "FRM":(currSlice->structure==TOP_FIELD ? "TOP":"BOT"));
for(i=0; i<(unsigned int)(currSlice->listXsize[0]); i++) //ref list 0
{
printf(" %2d -> POC: %4d PicNum: %4d ViewID: %d\n", i, currSlice->listX[0][i]->poc, currSlice->listX[0][i]->pic_num, currSlice->listX[0][i]->view_id);
}
}
if(currSlice->listXsize[1]>0)
{
printf(" ** (CurViewID:%d) %s Ref Pic List 1 ****\n", currSlice->view_id, currSlice->structure==FRAME ? "FRM":(currSlice->structure==TOP_FIELD ? "TOP":"BOT"));
for(i=0; i<(unsigned int)(currSlice->listXsize[1]); i++) //ref list 1
{
printf(" %2d -> POC: %4d PicNum: %4d ViewID: %d\n", i, currSlice->listX[1][i]->poc, currSlice->listX[1][i]->pic_num, currSlice->listX[1][i]->view_id);
}
}
}
#endif
#endif
}
}
/*!
************************************************************************
* \brief
* Reads new slice from bit_stream
************************************************************************
*/
//int read_new_slice(Slice *currSlice)
//{
// VideoParameters *p_Vid = currSlice->p_Vid;
// InputParameters *p_Inp = currSlice->p_Inp;
//
// NALU_t *nalu = p_Vid->nalu;
// int current_header = 0;
// int BitsUsedByHeader;
// Bitstream *currStream = NULL;
//
// int slice_id_a, slice_id_b, slice_id_c;
// int redundant_pic_cnt_b, redundant_pic_cnt_c;
//
// for (;;)
// {
//#if (MVC_EXTENSION_ENABLE)
// currSlice->svc_extension_flag = -1;
//#endif
// if (0 == read_next_nalu(p_Vid, nalu))
// return EOS;
//
//#if (MVC_EXTENSION_ENABLE)
// if(p_Inp->DecodeAllLayers == 1 && (nalu->nal_unit_type == NALU_TYPE_PREFIX || nalu->nal_unit_type == NALU_TYPE_SLC_EXT))
// {
// currStream = currSlice->partArr[0].bitstream;
// currStream->ei_flag = 0;
// currStream->frame_bitoffset = currStream->read_len = 0;
// fast_memcpy (currStream->streamBuffer, &nalu->buf[1], nalu->len-1);
// currStream->code_len = currStream->bitstream_length = RBSPtoSODB(currStream->streamBuffer, nalu->len-1);
//
// currSlice->svc_extension_flag = u_1 ("svc_extension_flag" , currStream);
//
// if(currSlice->svc_extension_flag)
// {
// nal_unit_header_svc_extension();
// }
// else
// {
// nal_unit_header_mvc_extension(&currSlice->NaluHeaderMVCExt, currStream);
// currSlice->NaluHeaderMVCExt.iPrefixNALU = (nalu->nal_unit_type == NALU_TYPE_PREFIX);
// }
//
// if(nalu->nal_unit_type == NALU_TYPE_SLC_EXT)
// {
// if(currSlice->svc_extension_flag)
// {
// //to be implemented for Annex G;
// }
// else
// {
// nalu->nal_unit_type = currSlice->NaluHeaderMVCExt.non_idr_flag==0? NALU_TYPE_IDR: NALU_TYPE_SLICE;
// }
// }
// }
//#endif
//
//process_nalu:
// switch (nalu->nal_unit_type)
// {
// case NALU_TYPE_SLICE:
// case NALU_TYPE_IDR:
//
// if (p_Vid->recovery_point || nalu->nal_unit_type == NALU_TYPE_IDR)
// {
// if (p_Vid->recovery_point_found == 0)
// {
// if (nalu->nal_unit_type != NALU_TYPE_IDR)
// {
// printf("Warning: Decoding does not start with an IDR picture.\n");
// p_Vid->non_conforming_stream = 1;
// }
// else
// p_Vid->non_conforming_stream = 0;
// }
// p_Vid->recovery_point_found = 1;
// }
//
// if (p_Vid->recovery_point_found == 0)
// break;
//
// currSlice->idr_flag = (nalu->nal_unit_type == NALU_TYPE_IDR);
// currSlice->nal_reference_idc = nalu->nal_reference_idc;
// currSlice->dp_mode = PAR_DP_1;
// currSlice->max_part_nr = 1;
//#if (MVC_EXTENSION_ENABLE)
// if (currSlice->svc_extension_flag != 0)
// {
// currStream = currSlice->partArr[0].bitstream;
// currStream->ei_flag = 0;
// currStream->frame_bitoffset = currStream->read_len = 0;
// fast_memcpy (currStream->streamBuffer, &nalu->buf[1], nalu->len-1);
// currStream->code_len = currStream->bitstream_length = RBSPtoSODB(currStream->streamBuffer, nalu->len-1);
// }
//#else
// currStream = currSlice->partArr[0].bitstream;
// currStream->ei_flag = 0;
// currStream->frame_bitoffset = currStream->read_len = 0;
// memcpy (currStream->streamBuffer, &nalu->buf[1], nalu->len-1);
// currStream->code_len = currStream->bitstream_length = RBSPtoSODB(currStream->streamBuffer, nalu->len-1);
//#endif
//
// // Some syntax of the Slice Header depends on the parameter set, which depends on
// // the parameter set ID of the SLice header. Hence, read the pic_parameter_set_id
// // of the slice header first, then setup the active parameter sets, and then read
// // the rest of the slice header
// BitsUsedByHeader = FirstPartOfSliceHeader(currSlice);
// UseParameterSet (currSlice);
// currSlice->active_sps = p_Vid->active_sps;
// currSlice->active_pps = p_Vid->active_pps;
// currSlice->Transform8x8Mode = p_Vid->active_pps->transform_8x8_mode_flag;
// currSlice->is_not_independent = (p_Vid->active_sps->chroma_format_idc==YUV444)&&((p_Vid->separate_colour_plane_flag == 0));
//
// BitsUsedByHeader += RestOfSliceHeader (currSlice);
//
//#if (MVC_EXTENSION_ENABLE)
// if(currSlice->svc_extension_flag == 0)
// { //MVC
// currSlice->view_id = currSlice->NaluHeaderMVCExt.view_id;
// currSlice->inter_view_flag = currSlice->NaluHeaderMVCExt.inter_view_flag;
// currSlice->anchor_pic_flag = currSlice->NaluHeaderMVCExt.anchor_pic_flag;
// }
// else if(currSlice->svc_extension_flag == -1) //SVC and the normal AVC;
// {
// if(p_Vid->active_subset_sps == NULL)
// {
// currSlice->view_id = GetBaseViewId(p_Vid, &p_Vid->active_subset_sps);
// if(currSlice->NaluHeaderMVCExt.iPrefixNALU >0)
// {
// assert(currSlice->view_id == currSlice->NaluHeaderMVCExt.view_id);
// currSlice->inter_view_flag = currSlice->NaluHeaderMVCExt.inter_view_flag;
// currSlice->anchor_pic_flag = currSlice->NaluHeaderMVCExt.anchor_pic_flag;
// }
// else
// {
// currSlice->inter_view_flag = 1;
// currSlice->anchor_pic_flag = currSlice->idr_flag;
// }
// }
// else
// {
// assert(p_Vid->active_subset_sps->num_views_minus1 >=0);
// // prefix NALU available
// if(currSlice->NaluHeaderMVCExt.iPrefixNALU >0)
// {
// currSlice->view_id = currSlice->NaluHeaderMVCExt.view_id;
// currSlice->inter_view_flag = currSlice->NaluHeaderMVCExt.inter_view_flag;
// currSlice->anchor_pic_flag = currSlice->NaluHeaderMVCExt.anchor_pic_flag;
// }
// else
// { //no prefix NALU;
// currSlice->view_id = p_Vid->active_subset_sps->view_id[0];
// currSlice->inter_view_flag = 1;
// currSlice->anchor_pic_flag = currSlice->idr_flag;
// }
// }
// }
//#endif
//
// //fmo_init (p_Vid, currSlice);
// //currSlice->frame_num = p_Vid->frame_num;
// //currSlice->active_sps = p_Vid->active_sps;
// //currSlice->active_pps = p_Vid->active_pps;
//
// assign_quant_params (currSlice);
//
// // if primary slice is replaced with redundant slice, set the correct image type
// if(currSlice->redundant_pic_cnt && p_Vid->Is_primary_correct==0 && p_Vid->Is_redundant_correct)
// {
// p_Vid->dec_picture->slice_type = p_Vid->type;
// }
//
// if(is_new_picture(p_Vid->dec_picture, currSlice, p_Vid->old_slice))
// {
// if(p_Vid->iSliceNumOfCurrPic==0)
// init_picture(p_Vid, currSlice, p_Inp);
//
// current_header = SOP;
// //check zero_byte if it is also the first NAL unit in the access unit
// CheckZeroByteVCL(p_Vid, nalu);
// }
// else
// current_header = SOS;
//
//
// setup_slice_methods(currSlice);
//
// // From here on, p_Vid->active_sps, p_Vid->active_pps and the slice header are valid
// if (currSlice->mb_aff_frame_flag)
// currSlice->current_mb_nr = currSlice->start_mb_nr << 1;
// else
// currSlice->current_mb_nr = currSlice->start_mb_nr;
//
// if (p_Vid->active_pps->entropy_coding_mode_flag)
// {
// int ByteStartPosition = currStream->frame_bitoffset/8;
// if (currStream->frame_bitoffset%8 != 0)
// {
// ++ByteStartPosition;
// }
// arideco_start_decoding (&currSlice->partArr[0].de_cabac, currStream->streamBuffer, ByteStartPosition, &currStream->read_len);
// }
// // printf ("read_new_slice: returning %s\n", current_header == SOP?"SOP":"SOS");
// //FreeNALU(nalu);
// p_Vid->recovery_point = 0;
// return current_header;
// break;
// case NALU_TYPE_DPA:
// // read DP_A
// currSlice->dpB_NotPresent =1;
// currSlice->dpC_NotPresent =1;
//
// currSlice->idr_flag = FALSE;
// currSlice->nal_reference_idc = nalu->nal_reference_idc;
// currSlice->dp_mode = PAR_DP_3;
// currSlice->max_part_nr = 3;
// currSlice->ei_flag = 0;
// currStream = currSlice->partArr[0].bitstream;
// currStream->ei_flag = 0;
// currStream->frame_bitoffset = currStream->read_len = 0;
// memcpy (currStream->streamBuffer, &nalu->buf[1], nalu->len-1);
// currStream->code_len = currStream->bitstream_length = RBSPtoSODB(currStream->streamBuffer, nalu->len-1);
//
// BitsUsedByHeader = FirstPartOfSliceHeader(currSlice);
// UseParameterSet (currSlice);
// BitsUsedByHeader += RestOfSliceHeader (currSlice);
//
// fmo_init (p_Vid, currSlice);
//
// if(is_new_picture(p_Vid->dec_picture, currSlice, p_Vid->old_slice))
// {
// init_picture(p_Vid, currSlice, p_Inp);
// current_header = SOP;
// CheckZeroByteVCL(p_Vid, nalu);
// }
// else
// current_header = SOS;
//
// update_pic_num(currSlice);
// currSlice->init_lists(currSlice);
// reorder_lists (currSlice);
//
// if (p_Vid->structure==FRAME)
// {
// init_mbaff_lists(p_Vid, currSlice);
// }
//
// // From here on, p_Vid->active_sps, p_Vid->active_pps and the slice header are valid
// if (currSlice->mb_aff_frame_flag)
// currSlice->current_mb_nr = currSlice->start_mb_nr << 1;
// else
// currSlice->current_mb_nr = currSlice->start_mb_nr;
//
// // Now I need to read the slice ID, which depends on the value of
// // redundant_pic_cnt_present_flag
//
// slice_id_a = ue_v("NALU: DP_A slice_id", currStream);
//
// if (p_Vid->active_pps->entropy_coding_mode_flag)
// error ("received data partition with CABAC, this is not allowed", 500);
//
// // continue with reading next DP
// if (0 == read_next_nalu(p_Vid, nalu))
// return current_header;
//
// if ( NALU_TYPE_DPB == nalu->nal_unit_type)
// {
// // we got a DPB
// currStream = currSlice->partArr[1].bitstream;
// currStream->ei_flag = 0;
// currStream->frame_bitoffset = currStream->read_len = 0;
//
// memcpy (currStream->streamBuffer, &nalu->buf[1], nalu->len-1);
// currStream->code_len = currStream->bitstream_length = RBSPtoSODB(currStream->streamBuffer, nalu->len-1);
//
// slice_id_b = ue_v("NALU: DP_B slice_id", currStream);
//
// currSlice->dpB_NotPresent = 0;
//
// if ((slice_id_b != slice_id_a) || (nalu->lost_packets))
// {
// printf ("Waning: got a data partition B which does not match DP_A (DP loss!)\n");
// currSlice->dpB_NotPresent =1;
// currSlice->dpC_NotPresent =1;
// }
// else
// {
// if (p_Vid->active_pps->redundant_pic_cnt_present_flag)
// redundant_pic_cnt_b = ue_v("NALU: DP_B redudant_pic_cnt", currStream);
// else
// redundant_pic_cnt_b = 0;
//
// // we're finished with DP_B, so let's continue with next DP
// if (0 == read_next_nalu(p_Vid, nalu))
// return current_header;
// }
// }
// else
// {
// currSlice->dpB_NotPresent =1;
// }
//
// // check if we got DP_C
// if ( NALU_TYPE_DPC == nalu->nal_unit_type)
// {
// currStream = currSlice->partArr[2].bitstream;
// currStream->ei_flag = 0;
// currStream->frame_bitoffset = currStream->read_len = 0;
//
// memcpy (currStream->streamBuffer, &nalu->buf[1], nalu->len-1);
// currStream->code_len = currStream->bitstream_length = RBSPtoSODB(currStream->streamBuffer, nalu->len-1);
//
// currSlice->dpC_NotPresent = 0;
//
// slice_id_c = ue_v("NALU: DP_C slice_id", currStream);
// if ((slice_id_c != slice_id_a)|| (nalu->lost_packets))
// {
// printf ("Warning: got a data partition C which does not match DP_A(DP loss!)\n");
// //currSlice->dpB_NotPresent =1;
// currSlice->dpC_NotPresent =1;
// }
//
// if (p_Vid->active_pps->redundant_pic_cnt_present_flag)
// redundant_pic_cnt_c = ue_v("NALU:SLICE_C redudand_pic_cnt", currStream);
// else
// redundant_pic_cnt_c = 0;
// }
// else
// {
// currSlice->dpC_NotPresent =1;
// }
//
// // check if we read anything else than the expected partitions
// if ((nalu->nal_unit_type != NALU_TYPE_DPB) && (nalu->nal_unit_type != NALU_TYPE_DPC))
// {
// // we have a NALI that we can't process here, so restart processing
// goto process_nalu;
// // yes, "goto" should not be used, but it's really the best way here before we restructure the decoding loop
// // (which should be taken care of anyway)
// }
//
// //FreeNALU(nalu);
// return current_header;
//
// break;
// case NALU_TYPE_DPB:
// if (p_Inp->silent == FALSE)
// {
// printf ("found data partition B without matching DP A, discarding\n");
// }
// break;
// case NALU_TYPE_DPC:
// if (p_Inp->silent == FALSE)
// {
// printf ("found data partition C without matching DP A, discarding\n");
// }
// break;
// case NALU_TYPE_SEI:
// //printf ("read_new_slice: Found NALU_TYPE_SEI, len %d\n", nalu->len);
// InterpretSEIMessage(nalu->buf,nalu->len,p_Vid, currSlice);
// break;
// case NALU_TYPE_PPS:
// //printf ("Found NALU_TYPE_PPS\n");
// ProcessPPS(p_Vid, nalu);
// break;
// case NALU_TYPE_SPS:
// //printf ("Found NALU_TYPE_SPS\n");
// ProcessSPS(p_Vid, nalu);
// break;
// case NALU_TYPE_AUD:
// //printf ("Found NALU_TYPE_AUD\n");
// // printf ("read_new_slice: Found 'Access Unit Delimiter' NAL unit, len %d, ignored\n", nalu->len);
// break;
// case NALU_TYPE_EOSEQ:
// // printf ("read_new_slice: Found 'End of Sequence' NAL unit, len %d, ignored\n", nalu->len);
// break;
// case NALU_TYPE_EOSTREAM:
// // printf ("read_new_slice: Found 'End of Stream' NAL unit, len %d, ignored\n", nalu->len);
// break;
// case NALU_TYPE_FILL:
// if (p_Inp->silent == FALSE)
// {
// printf ("read_new_slice: Found NALU_TYPE_FILL, len %d\n", (int) nalu->len);
// printf ("Skipping these filling bits, proceeding w/ next NALU\n");
// }
// break;
//#if (MVC_EXTENSION_ENABLE)
// case NALU_TYPE_VDRD:
// //printf ("Found NALU_TYPE_VDRD\n");
// // printf ("read_new_slice: Found 'View and Dependency Representation Delimiter' NAL unit, len %d, ignored\n", nalu->len);
// break;
// case NALU_TYPE_PREFIX:
// //printf ("Found NALU_TYPE_PREFIX\n");
// if(currSlice->svc_extension_flag==1)
// prefix_nal_unit_svc();
// break;
// case NALU_TYPE_SUB_SPS:
// //printf ("Found NALU_TYPE_SUB_SPS\n");
// if (p_Inp->DecodeAllLayers== 1)
// ProcessSubsetSPS(p_Vid, nalu);
// else
// {
// if (p_Inp->silent == FALSE)
// printf ("Found Subsequence SPS NALU. Ignoring.\n");
// }
// break;
// case NALU_TYPE_SLC_EXT:
// //printf ("Found NALU_TYPE_SLC_EXT\n");
// if (p_Inp->DecodeAllLayers == 0 && (p_Inp->silent == FALSE))
// printf ("Found SVC extension NALU (%d). Ignoring.\n", (int) nalu->nal_unit_type);
// break;
//#endif
// default:
// {
// if (p_Inp->silent == FALSE)
// printf ("Found NALU type %d, len %d undefined, ignore NALU, moving on\n", (int) nalu->nal_unit_type, (int) nalu->len);
// }
// break;
// }
// }
//}
void pad_buf(imgpel *pImgBuf, int iWidth, int iHeight, int iStride, int iPadX, int iPadY)
{
int j;
imgpel *pLine0 = pImgBuf - iPadX, *pLine;
#if (IMGTYPE==0)
int pad_width = iPadX + iWidth;
fast_memset(pImgBuf - iPadX, *pImgBuf, iPadX * sizeof(imgpel));
fast_memset(pImgBuf + iWidth, *(pImgBuf + iWidth - 1), iPadX * sizeof(imgpel));
pLine = pLine0 - iPadY * iStride;
for(j = -iPadY; j < 0; j++)
{
fast_memcpy(pLine, pLine0, iStride * sizeof(imgpel));
pLine += iStride;
}
for(j = 1; j < iHeight; j++)
{
pLine += iStride;
fast_memset(pLine, *(pLine + iPadX), iPadX * sizeof(imgpel));
fast_memset(pLine + pad_width, *(pLine + pad_width - 1), iPadX * sizeof(imgpel));
}
pLine0 = pLine + iStride;
for(j = iHeight; j < iHeight + iPadY; j++)
{
fast_memcpy(pLine0, pLine, iStride * sizeof(imgpel));
pLine0 += iStride;
}
#else
int i;
for(i=-iPadX; i<0; i++)
pImgBuf[i] = *pImgBuf;
for(i=0; i<iPadX; i++)
pImgBuf[i+iWidth] = *(pImgBuf+iWidth-1);
for(j=-iPadY; j<0; j++)
memcpy(pLine0+j*iStride, pLine0, iStride*sizeof(imgpel));
for(j=1; j<iHeight; j++)
{
pLine = pLine0 + j*iStride;
for(i=0; i<iPadX; i++)
pLine[i] = pLine[iPadX];
pLine += iPadX+iWidth-1;
for(i=1; i<iPadX+1; i++)
pLine[i] = *pLine;
}
pLine = pLine0 + (iHeight-1)*iStride;
for(j=iHeight; j<iHeight+iPadY; j++)
memcpy(pLine0+j*iStride, pLine, iStride*sizeof(imgpel));
#endif
}
void pad_dec_picture(VideoParameters *p_Vid, StorablePicture *dec_picture)
{
int iPadX = p_Vid->iLumaPadX;
int iPadY = p_Vid->iLumaPadY;
int iWidth = dec_picture->size_x;
int iHeight = dec_picture->size_y;
int iStride = dec_picture->iLumaStride;
pad_buf(*dec_picture->imgY, iWidth, iHeight, iStride, iPadX, iPadY);
if(dec_picture->chroma_format_idc != YUV400)
{
iPadX = p_Vid->iChromaPadX;
iPadY = p_Vid->iChromaPadY;
iWidth = dec_picture->size_x_cr;
iHeight = dec_picture->size_y_cr;
iStride = dec_picture->iChromaStride;
pad_buf(*dec_picture->imgUV[0], iWidth, iHeight, iStride, iPadX, iPadY);
pad_buf(*dec_picture->imgUV[1], iWidth, iHeight, iStride, iPadX, iPadY);
}
}
/*!
************************************************************************
* \brief
* finish decoding of a picture, conceal errors and store it
* into the DPB
************************************************************************
*/
//void exit_picture(VideoParameters *p_Vid, StorablePicture **dec_picture)
//{
// InputParameters *p_Inp = p_Vid->p_Inp;
// SNRParameters *snr = p_Vid->snr;
// char yuv_types[4][6]= {"4:0:0","4:2:0","4:2:2","4:4:4"};
// int ercStartMB;
// int ercSegment;
// frame recfr;
// int structure, frame_poc, slice_type, refpic, qp, pic_num, chroma_format_idc, is_idr, top_poc, bottom_poc;
//
// int64 tmp_time; // time used by decoding the last frame
// char yuvFormat[10];
//
//
// // return if the last picture has already been finished
// if (*dec_picture==NULL || (p_Vid->num_dec_mb != p_Vid->PicSizeInMbs && (p_Vid->yuv_format != YUV444 || !p_Vid->separate_colour_plane_flag)))
// {
// return;
// }
//
// recfr.p_Vid = p_Vid;
// recfr.yptr = &(*dec_picture)->imgY[0][0];
// if ((*dec_picture)->chroma_format_idc != YUV400)
// {
// recfr.uptr = &(*dec_picture)->imgUV[0][0][0];
// recfr.vptr = &(*dec_picture)->imgUV[1][0][0];
// }
//
// //! this is always true at the beginning of a picture
// ercStartMB = 0;
// ercSegment = 0;
//
// //! mark the start of the first segment
//#if (DISABLE_ERC == 0)
// if (!(*dec_picture)->mb_aff_frame_flag)
// {
// int i;
// ercStartSegment(0, ercSegment, 0 , p_Vid->erc_errorVar);
// //! generate the segments according to the macroblock map
// for(i = 1; i<(*dec_picture)->PicSizeInMbs; ++i)
// {
// if(p_Vid->mb_data[i].ei_flag != p_Vid->mb_data[i-1].ei_flag)
// {
// ercStopSegment(i-1, ercSegment, 0, p_Vid->erc_errorVar); //! stop current segment
//
// //! mark current segment as lost or OK
// if(p_Vid->mb_data[i-1].ei_flag)
// ercMarkCurrSegmentLost((*dec_picture)->size_x, p_Vid->erc_errorVar);
// else
// ercMarkCurrSegmentOK((*dec_picture)->size_x, p_Vid->erc_errorVar);
//
// ++ercSegment; //! next segment
// ercStartSegment(i, ercSegment, 0 , p_Vid->erc_errorVar); //! start new segment
// ercStartMB = i;//! save start MB for this segment
// }
// }
// //! mark end of the last segment
// ercStopSegment((*dec_picture)->PicSizeInMbs-1, ercSegment, 0, p_Vid->erc_errorVar);
// if(p_Vid->mb_data[i-1].ei_flag)
// ercMarkCurrSegmentLost((*dec_picture)->size_x, p_Vid->erc_errorVar);
// else
// ercMarkCurrSegmentOK((*dec_picture)->size_x, p_Vid->erc_errorVar);
//
// //! call the right error concealment function depending on the frame type.
// p_Vid->erc_mvperMB /= (*dec_picture)->PicSizeInMbs;
//
// p_Vid->erc_img = p_Vid;
//
// if((*dec_picture)->slice_type == I_SLICE || (*dec_picture)->slice_type == SI_SLICE) // I-frame
// ercConcealIntraFrame(p_Vid, &recfr, (*dec_picture)->size_x, (*dec_picture)->size_y, p_Vid->erc_errorVar);
// else
// ercConcealInterFrame(&recfr, p_Vid->erc_object_list, (*dec_picture)->size_x, (*dec_picture)->size_y, p_Vid->erc_errorVar, (*dec_picture)->chroma_format_idc);
// }
//#endif
//
// if(!p_Vid->iDeblockMode && (p_Vid->bDeblockEnable & (1<<(*dec_picture)->used_for_reference)))
// {
// //deblocking for frame or field
// if( (p_Vid->separate_colour_plane_flag != 0) )
// {
// int nplane;
// int colour_plane_id = p_Vid->ppSliceList[0]->colour_plane_id;
// for( nplane=0; nplane<MAX_PLANE; ++nplane )
// {
// p_Vid->ppSliceList[0]->colour_plane_id = nplane;
// change_plane_JV( p_Vid, nplane, NULL );
// DeblockPicture( p_Vid, *dec_picture );
// }
// p_Vid->ppSliceList[0]->colour_plane_id = colour_plane_id;
// make_frame_picture_JV(p_Vid);
// }
// else
// {
// DeblockPicture( p_Vid, *dec_picture );
// }
// }
// else
// {
// if( (p_Vid->separate_colour_plane_flag != 0) )
// {
// make_frame_picture_JV(p_Vid);
// }
// }
//
// if ((*dec_picture)->mb_aff_frame_flag)
// MbAffPostProc(p_Vid);
//
// if (p_Vid->structure == FRAME) // buffer mgt. for frame mode
// frame_postprocessing(p_Vid);
// else
// field_postprocessing(p_Vid); // reset all interlaced variables
//#if (MVC_EXTENSION_ENABLE)
// if((*dec_picture)->used_for_reference || ((*dec_picture)->inter_view_flag == 1))
// pad_dec_picture(p_Vid, *dec_picture);
//#else
// if((*dec_picture)->used_for_reference)
// pad_dec_picture(p_Vid, *dec_picture);
//#endif
// structure = (*dec_picture)->structure;
// slice_type = (*dec_picture)->slice_type;
// frame_poc = (*dec_picture)->frame_poc;
// top_poc = (*dec_picture)->top_poc;
// bottom_poc = (*dec_picture)->bottom_poc;
// refpic = (*dec_picture)->used_for_reference;
// qp = (*dec_picture)->qp;
// pic_num = (*dec_picture)->pic_num;
// is_idr = (*dec_picture)->idr_flag;
//
// chroma_format_idc = (*dec_picture)->chroma_format_idc;
//
// store_picture_in_dpb(p_Vid->p_Dpb, *dec_picture);
// *dec_picture=NULL;
//
// if (p_Vid->last_has_mmco_5)
// {
// p_Vid->pre_frame_num = 0;
// }
//
// if (p_Inp->silent == FALSE)
// {
// if (structure==TOP_FIELD || structure==FRAME)
// {
// if(slice_type == I_SLICE && is_idr) // IDR picture
// strcpy(p_Vid->cslice_type,"IDR");
// else if(slice_type == I_SLICE) // I picture
// strcpy(p_Vid->cslice_type," I ");
// else if(slice_type == P_SLICE) // P pictures
// strcpy(p_Vid->cslice_type," P ");
// else if(slice_type == SP_SLICE) // SP pictures
// strcpy(p_Vid->cslice_type,"SP ");
// else if (slice_type == SI_SLICE)
// strcpy(p_Vid->cslice_type,"SI ");
// else if(refpic) // stored B pictures
// strcpy(p_Vid->cslice_type," B ");
// else // B pictures
// strcpy(p_Vid->cslice_type," b ");
//
// if (structure==FRAME)
// {
// strncat(p_Vid->cslice_type,") ",8-strlen(p_Vid->cslice_type));
// }
// }
// else if (structure==BOTTOM_FIELD)
// {
// if(slice_type == I_SLICE && is_idr) // IDR picture
// strncat(p_Vid->cslice_type,"|IDR)",8-strlen(p_Vid->cslice_type));
// else if(slice_type == I_SLICE) // I picture
// strncat(p_Vid->cslice_type,"| I )",8-strlen(p_Vid->cslice_type));
// else if(slice_type == P_SLICE) // P pictures
// strncat(p_Vid->cslice_type,"| P )",8-strlen(p_Vid->cslice_type));
// else if(slice_type == SP_SLICE) // SP pictures
// strncat(p_Vid->cslice_type,"|SP )",8-strlen(p_Vid->cslice_type));
// else if (slice_type == SI_SLICE)
// strncat(p_Vid->cslice_type,"|SI )",8-strlen(p_Vid->cslice_type));
// else if(refpic) // stored B pictures
// strncat(p_Vid->cslice_type,"| B )",8-strlen(p_Vid->cslice_type));
// else // B pictures
// strncat(p_Vid->cslice_type,"| b )",8-strlen(p_Vid->cslice_type));
// }
// }
//
// if ((structure==FRAME)||structure==BOTTOM_FIELD)
// {
// gettime (&(p_Vid->end_time)); // end time
//
// tmp_time = timediff(&(p_Vid->start_time), &(p_Vid->end_time));
// p_Vid->tot_time += tmp_time;
// tmp_time = timenorm(tmp_time);
// sprintf(yuvFormat,"%s", yuv_types[chroma_format_idc]);
//
// if (p_Inp->silent == FALSE)
// {
// SNRParameters *snr = p_Vid->snr;
// if (p_Vid->p_ref != -1)
// fprintf(stdout,"%05d(%s%5d %5d %5d %8.4f %8.4f %8.4f %s %7d\n",
// p_Vid->frame_no, p_Vid->cslice_type, frame_poc, pic_num, qp, snr->snr[0], snr->snr[1], snr->snr[2], yuvFormat, (int) tmp_time);
// else
// fprintf(stdout,"%05d(%s%5d %5d %5d %s %7d\n",
// p_Vid->frame_no, p_Vid->cslice_type, frame_poc, pic_num, qp, yuvFormat, (int)tmp_time);
// }
// else
// fprintf(stdout,"Completed Decoding frame %05d.\r",snr->frame_ctr);
//
// fflush(stdout);
//
// if(slice_type == I_SLICE || slice_type == SI_SLICE || slice_type == P_SLICE || refpic) // I or P pictures
// {
//#if (MVC_EXTENSION_ENABLE)
// if((p_Vid->ppSliceList[0])->view_id!=0)
//#endif
// ++(p_Vid->number);
// }
// else
// ++(p_Vid->Bframe_ctr); // B pictures
// ++(snr->frame_ctr);
//
// ++(p_Vid->g_nFrame);
// }
//
// //p_Vid->currentSlice->current_mb_nr = -4712; // impossible value for debugging, StW
// //p_Vid->currentSlice->current_slice_nr = 0;
//}
/*!
************************************************************************
* \brief
* write the encoding mode and motion vectors of current
* MB to the buffer of the error concealment module.
************************************************************************
*/
void ercWriteMBMODEandMV(Macroblock *currMB)
{
VideoParameters *p_Vid = currMB->p_Vid;
int i, ii, jj, currMBNum = currMB->mbAddrX; //p_Vid->currentSlice->current_mb_nr;
StorablePicture *dec_picture = p_Vid->dec_picture;
int mbx = xPosMB(currMBNum, dec_picture->size_x), mby = yPosMB(currMBNum, dec_picture->size_x);
objectBuffer_t *currRegion, *pRegion;
currRegion = p_Vid->erc_object_list + (currMBNum<<2);
if(p_Vid->type != B_SLICE) //non-B frame
{
for (i=0; i<4; ++i)
{
pRegion = currRegion + i;
pRegion->regionMode = (currMB->mb_type ==I16MB ? REGMODE_INTRA :
currMB->b8mode[i]==IBLOCK ? REGMODE_INTRA_8x8 :
currMB->b8mode[i]==0 ? REGMODE_INTER_COPY :
currMB->b8mode[i]==1 ? REGMODE_INTER_PRED : REGMODE_INTER_PRED_8x8);
if (currMB->b8mode[i]==0 || currMB->b8mode[i]==IBLOCK) // INTRA OR COPY
{
pRegion->mv[0] = 0;
pRegion->mv[1] = 0;
pRegion->mv[2] = 0;
}
else
{
ii = 4*mbx + (i & 0x01)*2;// + BLOCK_SIZE;
jj = 4*mby + (i >> 1 )*2;
if (currMB->b8mode[i]>=5 && currMB->b8mode[i]<=7) // SMALL BLOCKS
{
pRegion->mv[0] = (dec_picture->mv_info[jj][ii].mv[LIST_0].mv_x + dec_picture->mv_info[jj][ii + 1].mv[LIST_0].mv_x + dec_picture->mv_info[jj + 1][ii].mv[LIST_0].mv_x + dec_picture->mv_info[jj + 1][ii + 1].mv[LIST_0].mv_x + 2)/4;
pRegion->mv[1] = (dec_picture->mv_info[jj][ii].mv[LIST_0].mv_y + dec_picture->mv_info[jj][ii + 1].mv[LIST_0].mv_y + dec_picture->mv_info[jj + 1][ii].mv[LIST_0].mv_y + dec_picture->mv_info[jj + 1][ii + 1].mv[LIST_0].mv_y + 2)/4;
}
else // 16x16, 16x8, 8x16, 8x8
{
pRegion->mv[0] = dec_picture->mv_info[jj][ii].mv[LIST_0].mv_x;
pRegion->mv[1] = dec_picture->mv_info[jj][ii].mv[LIST_0].mv_y;
// pRegion->mv[0] = dec_picture->motion.mv[LIST_0][4*mby+(i/2)*2][4*mbx+(i%2)*2+BLOCK_SIZE][0];
// pRegion->mv[1] = dec_picture->motion.mv[LIST_0][4*mby+(i/2)*2][4*mbx+(i%2)*2+BLOCK_SIZE][1];
}
currMB->p_Slice->erc_mvperMB += iabs(pRegion->mv[0]) + iabs(pRegion->mv[1]);
pRegion->mv[2] = dec_picture->mv_info[jj][ii].ref_idx[LIST_0];
}
}
}
else //B-frame
{
for (i=0; i<4; ++i)
{
ii = 4*mbx + (i%2)*2;// + BLOCK_SIZE;
jj = 4*mby + (i/2)*2;
pRegion = currRegion + i;
pRegion->regionMode = (currMB->mb_type ==I16MB ? REGMODE_INTRA :
currMB->b8mode[i]==IBLOCK ? REGMODE_INTRA_8x8 : REGMODE_INTER_PRED_8x8);
if (currMB->mb_type==I16MB || currMB->b8mode[i]==IBLOCK) // INTRA
{
pRegion->mv[0] = 0;
pRegion->mv[1] = 0;
pRegion->mv[2] = 0;
}
else
{
int idx = (dec_picture->mv_info[jj][ii].ref_idx[0] < 0) ? 1 : 0;
// int idx = (currMB->b8mode[i]==0 && currMB->b8pdir[i]==2 ? LIST_0 : currMB->b8pdir[i]==1 ? LIST_1 : LIST_0);
// int idx = currMB->b8pdir[i]==0 ? LIST_0 : LIST_1;
pRegion->mv[0] = (dec_picture->mv_info[jj][ii].mv[idx].mv_x +
dec_picture->mv_info[jj][ii+1].mv[idx].mv_x +
dec_picture->mv_info[jj+1][ii].mv[idx].mv_x +
dec_picture->mv_info[jj+1][ii+1].mv[idx].mv_x + 2)/4;
pRegion->mv[1] = (dec_picture->mv_info[jj][ii].mv[idx].mv_y +
dec_picture->mv_info[jj][ii+1].mv[idx].mv_y +
dec_picture->mv_info[jj+1][ii].mv[idx].mv_y +
dec_picture->mv_info[jj+1][ii+1].mv[idx].mv_y + 2)/4;
currMB->p_Slice->erc_mvperMB += iabs(pRegion->mv[0]) + iabs(pRegion->mv[1]);
pRegion->mv[2] = (dec_picture->mv_info[jj][ii].ref_idx[idx]);
/*
if (currMB->b8pdir[i]==0 || (currMB->b8pdir[i]==2 && currMB->b8mode[i]!=0)) // forward or bidirect
{
pRegion->mv[2] = (dec_picture->motion.ref_idx[LIST_0][jj][ii]);
///???? is it right, not only "p_Vid->fw_refFrArr[jj][ii-4]"
}
else
{
pRegion->mv[2] = (dec_picture->motion.ref_idx[LIST_1][jj][ii]);
// pRegion->mv[2] = 0;
}
*/
}
}
}
}
/*!
************************************************************************
* \brief
* set defaults for old_slice
* NAL unit of a picture"
************************************************************************
*/
void init_old_slice(OldSliceParams *p_old_slice)
{
p_old_slice->field_pic_flag = 0;
p_old_slice->pps_id = INT_MAX;
p_old_slice->frame_num = INT_MAX;
p_old_slice->nal_ref_idc = INT_MAX;
p_old_slice->idr_flag = FALSE;
p_old_slice->pic_oder_cnt_lsb = UINT_MAX;
p_old_slice->delta_pic_oder_cnt_bottom = INT_MAX;
p_old_slice->delta_pic_order_cnt[0] = INT_MAX;
p_old_slice->delta_pic_order_cnt[1] = INT_MAX;
}
void CopySliceInfo(Slice *currSlice, OldSliceParams *p_old_slice,seq_parameter_set_rbsp_t *active_sps)
{
//VideoParameters *p_Vid = currSlice->p_Vid;
p_old_slice->pps_id = currSlice->pic_parameter_set_id;
p_old_slice->frame_num = currSlice->frame_num; //p_Vid->frame_num;
p_old_slice->field_pic_flag = currSlice->field_pic_flag; //p_Vid->field_pic_flag;
if(currSlice->field_pic_flag)
{
p_old_slice->bottom_field_flag = currSlice->bottom_field_flag;
}
p_old_slice->nal_ref_idc = currSlice->nal_reference_idc;
p_old_slice->idr_flag = (byte) currSlice->idr_flag;
if (currSlice->idr_flag)
{
p_old_slice->idr_pic_id = currSlice->idr_pic_id;
}
if (/*p_Vid->*/active_sps->pic_order_cnt_type == 0)
{
p_old_slice->pic_oder_cnt_lsb = currSlice->pic_order_cnt_lsb;
p_old_slice->delta_pic_oder_cnt_bottom = currSlice->delta_pic_order_cnt_bottom;
}
if (/*p_Vid->*/active_sps->pic_order_cnt_type == 1)
{
p_old_slice->delta_pic_order_cnt[0] = currSlice->delta_pic_order_cnt[0];
p_old_slice->delta_pic_order_cnt[1] = currSlice->delta_pic_order_cnt[1];
}
#if (MVC_EXTENSION_ENABLE)
p_old_slice->view_id = currSlice->view_id;
p_old_slice->inter_view_flag = currSlice->inter_view_flag;
p_old_slice->anchor_pic_flag = currSlice->anchor_pic_flag;
#endif
}
/*!
************************************************************************
* \brief
* detect if current slice is "first VCL NAL unit of a picture"
************************************************************************
*/
int is_new_picture( Slice *currSlice, OldSliceParams *p_old_slice,seq_parameter_set_rbsp_t *active_sps,pic_parameter_set_rbsp_t *active_pps)
{
//VideoParameters *p_Vid = currSlice->p_Vid;
int result=0;
//result |= (NULL==dec_picture);
result |= (p_old_slice->pps_id != currSlice->pic_parameter_set_id);
result |= (p_old_slice->frame_num != currSlice->frame_num);
result |= (p_old_slice->field_pic_flag != currSlice->field_pic_flag);
if(currSlice->field_pic_flag && p_old_slice->field_pic_flag)
{
result |= (p_old_slice->bottom_field_flag != currSlice->bottom_field_flag);
}
result |= (p_old_slice->nal_ref_idc != currSlice->nal_reference_idc) && ((p_old_slice->nal_ref_idc == 0) || (currSlice->nal_reference_idc == 0));
result |= (p_old_slice->idr_flag != currSlice->idr_flag);
if (currSlice->idr_flag && p_old_slice->idr_flag)
{
result |= (p_old_slice->idr_pic_id != currSlice->idr_pic_id);
}
if (/*p_Vid->*/active_sps->pic_order_cnt_type == 0)
{
result |= (p_old_slice->pic_oder_cnt_lsb != currSlice->pic_order_cnt_lsb);
if( /*p_Vid->*/active_pps->bottom_field_pic_order_in_frame_present_flag == 1 && !currSlice->field_pic_flag )
{
result |= (p_old_slice->delta_pic_oder_cnt_bottom != currSlice->delta_pic_order_cnt_bottom);
}
}
if (/*p_Vid->*/active_sps->pic_order_cnt_type == 1)
{
if (!/*p_Vid->*/active_sps->delta_pic_order_always_zero_flag)
{
result |= (p_old_slice->delta_pic_order_cnt[0] != currSlice->delta_pic_order_cnt[0]);
if( /*p_Vid->*/active_pps->bottom_field_pic_order_in_frame_present_flag == 1 && !currSlice->field_pic_flag )
{
result |= (p_old_slice->delta_pic_order_cnt[1] != currSlice->delta_pic_order_cnt[1]);
}
}
}
#if (MVC_EXTENSION_ENABLE)
result |= (currSlice->view_id != p_old_slice->view_id);
result |= (currSlice->inter_view_flag != p_old_slice->inter_view_flag);
result |= (currSlice->anchor_pic_flag != p_old_slice->anchor_pic_flag);
#endif
return result;
}
/*!
************************************************************************
* \brief
* Prepare field and frame buffer after frame decoding
************************************************************************
*/
void frame_postprocessing(VideoParameters *p_Vid)
{
}
/*!
************************************************************************
* \brief
* Prepare field and frame buffer after field decoding
************************************************************************
*/
void field_postprocessing(VideoParameters *p_Vid)
{
p_Vid->number /= 2;
}
/*!
************************************************************************
* \brief
* copy StorablePicture *src -> StorablePicture *dst
* for 4:4:4 Independent mode
************************************************************************
*/
void copy_dec_picture_JV( VideoParameters *p_Vid, StorablePicture *dst, StorablePicture *src )
{
dst->top_poc = src->top_poc;
dst->bottom_poc = src->bottom_poc;
dst->frame_poc = src->frame_poc;
dst->qp = src->qp;
dst->slice_qp_delta = src->slice_qp_delta;
dst->chroma_qp_offset[0] = src->chroma_qp_offset[0];
dst->chroma_qp_offset[1] = src->chroma_qp_offset[1];
dst->poc = src->poc;
dst->slice_type = src->slice_type;
dst->used_for_reference = src->used_for_reference;
dst->idr_flag = src->idr_flag;
dst->no_output_of_prior_pics_flag = src->no_output_of_prior_pics_flag;
dst->long_term_reference_flag = src->long_term_reference_flag;
dst->adaptive_ref_pic_buffering_flag = src->adaptive_ref_pic_buffering_flag;
dst->dec_ref_pic_marking_buffer = src->dec_ref_pic_marking_buffer;
dst->mb_aff_frame_flag = src->mb_aff_frame_flag;
dst->PicWidthInMbs = src->PicWidthInMbs;
dst->pic_num = src->pic_num;
dst->frame_num = src->frame_num;
dst->recovery_frame = src->recovery_frame;
dst->coded_frame = src->coded_frame;
dst->chroma_format_idc = src->chroma_format_idc;
dst->frame_mbs_only_flag = src->frame_mbs_only_flag;
dst->frame_cropping_flag = src->frame_cropping_flag;
dst->frame_cropping_rect_left_offset = src->frame_cropping_rect_left_offset;
dst->frame_cropping_rect_right_offset = src->frame_cropping_rect_right_offset;
dst->frame_cropping_rect_top_offset = src->frame_cropping_rect_top_offset;
dst->frame_cropping_rect_bottom_offset = src->frame_cropping_rect_bottom_offset;
#if (ENABLE_OUTPUT_TONEMAPPING)
// store the necessary tone mapping sei into StorablePicture structure
dst->seiHasTone_mapping = src->seiHasTone_mapping;
dst->seiHasTone_mapping = src->seiHasTone_mapping;
dst->tone_mapping_model_id = src->tone_mapping_model_id;
dst->tonemapped_bit_depth = src->tonemapped_bit_depth;
if( src->tone_mapping_lut )
{
int coded_data_bit_max = (1 << p_Vid->seiToneMapping->coded_data_bit_depth);
dst->tone_mapping_lut = malloc(sizeof(int) * coded_data_bit_max);
if (NULL == dst->tone_mapping_lut)
{
no_mem_exit("copy_dec_picture_JV: tone_mapping_lut");
}
memcpy(dst->tone_mapping_lut, src->tone_mapping_lut, sizeof(imgpel) * coded_data_bit_max);
}
#endif
}
// this is intended to make get_block_luma faster by doing this at a more appropriate level
// i.e. per slice rather than per MB
static void init_cur_imgy(Slice *currSlice, VideoParameters *p_Vid)
{
int i,j;
if ((p_Vid->separate_colour_plane_flag != 0)) {
StorablePicture *vidref = p_Vid->no_reference_picture;
int noref = (currSlice->framepoc < p_Vid->recovery_poc);
switch(currSlice->colour_plane_id) {
case 0:
for (j = 0; j < 6; j++) { //for (j = 0; j < (currSlice->slice_type==B_SLICE?2:1); j++) {
for (i = 0; i < MAX_LIST_SIZE; i++) {
StorablePicture *curr_ref = currSlice->listX[j][i];
if (curr_ref) {
curr_ref->no_ref = noref && (curr_ref == vidref);
curr_ref->cur_imgY = curr_ref->imgY;
}
}
}
break;
#if 0
case 1:
for (j = 0; j < 6; j++) { //for (j = 0; j < (currSlice->slice_type==B_SLICE?2:1); j++) { //
for (i = 0; i < MAX_LIST_SIZE; i++) {
StorablePicture *curr_ref = currSlice->listX[j][i];
if (curr_ref) {
curr_ref->no_ref = noref && (curr_ref == vidref);
curr_ref->cur_imgY = curr_ref->imgUV[0];
}
}
}
break;
case 2:
for (j = 0; j < 6; j++) { //for (j = 0; j < (currSlice->slice_type==B_SLICE?2:1); j++) { //
for (i = 0; i < MAX_LIST_SIZE; i++) {
StorablePicture *curr_ref = currSlice->listX[j][i];
if (curr_ref) {
curr_ref->no_ref = noref && (curr_ref == vidref);
curr_ref->cur_imgY = curr_ref->imgUV[1];
}
}
}
break;
#endif
}
}
else
{
StorablePicture *vidref = p_Vid->no_reference_picture;
int noref = (currSlice->framepoc < p_Vid->recovery_poc);
int total_lists = currSlice->mb_aff_frame_flag ? 6 : (currSlice->slice_type==B_SLICE ? 2 : 1);
// for (j = 0; j < 6; j++) { //for (j = 0; j < (currSlice->slice_type==B_SLICE?2:1); j++) {
for (j = 0; j < total_lists; j++)
{
// note that if we always set this to MAX_LIST_SIZE, we avoid crashes with invalid ref_idx being set
// since currently this is done at the slice level, it seems safe to do so.
// Note for some reason I get now a mismatch between version 12 and this one in cabac. I wonder why.
//for (i = 0; i < currSlice->listXsize[j]; i++)
for (i = 0; i < MAX_LIST_SIZE; i++)
{
StorablePicture *curr_ref = currSlice->listX[j][i];
if (curr_ref) {
curr_ref->no_ref = noref && (curr_ref == vidref);
curr_ref->cur_imgY = curr_ref->imgY;
}
}
}
}
}
/*!
************************************************************************
* \brief
* decodes one slice
************************************************************************
*/
//void decode_one_slice(Slice *currSlice)
//{
// VideoParameters *p_Vid = currSlice->p_Vid;
// Boolean end_of_slice = FALSE;
// Macroblock *currMB = NULL;
// currSlice->cod_counter=-1;
//
// if( (p_Vid->separate_colour_plane_flag != 0) )
// {
// change_plane_JV( p_Vid, currSlice->colour_plane_id, currSlice );
// }
// else
// {
// currSlice->mb_data = p_Vid->mb_data;
// currSlice->dec_picture = p_Vid->dec_picture;
// currSlice->siblock = p_Vid->siblock;
// currSlice->ipredmode = p_Vid->ipredmode;
// currSlice->intra_block = p_Vid->intra_block;
// }
//
// if (currSlice->slice_type == B_SLICE)
// {
// compute_colocated(currSlice, currSlice->listX);
// }
//
// if (currSlice->slice_type != I_SLICE && currSlice->slice_type != SI_SLICE)
// init_cur_imgy(currSlice,p_Vid);
//
// //reset_ec_flags(p_Vid);
//
// while (end_of_slice == FALSE) // loop over macroblocks
// {
//
//#if TRACE
// fprintf(p_Dec->p_trace,"\n*********** POC: %i (I/P) MB: %i Slice: %i Type %d **********\n", currSlice->ThisPOC, currSlice->current_mb_nr, currSlice->current_slice_nr, currSlice->slice_type);
//#endif
//
// // Initializes the current macroblock
// start_macroblock(currSlice, &currMB);
// // Get the syntax elements from the NAL
// currSlice->read_one_macroblock(currMB);
// decode_one_macroblock(currMB, currSlice->dec_picture);
//
// if(currSlice->mb_aff_frame_flag && currMB->mb_field)
// {
// currSlice->num_ref_idx_active[LIST_0] >>= 1;
// currSlice->num_ref_idx_active[LIST_1] >>= 1;
// }
//
//#if (DISABLE_ERC == 0)
// ercWriteMBMODEandMV(currMB);
//#endif
//
// end_of_slice = exit_macroblock(currSlice, (!currSlice->mb_aff_frame_flag|| currSlice->current_mb_nr%2));
// }
//
// //reset_ec_flags(p_Vid);
//}
#if (MVC_EXTENSION_ENABLE)
int GetVOIdx(VideoParameters *p_Vid, int iViewId)
{
int iVOIdx = -1;
int *piViewIdMap;
if(p_Vid->active_subset_sps)
{
piViewIdMap = p_Vid->active_subset_sps->view_id;
for(iVOIdx = p_Vid->active_subset_sps->num_views_minus1; iVOIdx>=0; iVOIdx--)
if(piViewIdMap[iVOIdx] == iViewId)
break;
}
return iVOIdx;
}
int get_maxViewIdx (VideoParameters *p_Vid, int view_id, int anchor_pic_flag, int listidx)
{
int VOIdx;
int maxViewIdx = 0;
VOIdx = GetVOIdx(p_Vid, view_id);
if(VOIdx >= 0)
{
if(anchor_pic_flag)
maxViewIdx = listidx? p_Vid->active_subset_sps->num_anchor_refs_l1[VOIdx] : p_Vid->active_subset_sps->num_anchor_refs_l0[VOIdx];
else
maxViewIdx = listidx? p_Vid->active_subset_sps->num_non_anchor_refs_l1[VOIdx] : p_Vid->active_subset_sps->num_non_anchor_refs_l0[VOIdx];
}
return maxViewIdx;
}
#endif
|
factors.c | /*********************
* Time intergral KDK scheme.
* kick and drifts.
*
* This code was initially modified by Jun Koda,
* from the original serial COLA code
* by Svetlin Tassev.
*
* The kick and drift still supports a COLA compat-mode.
* Most of the nasty factors are for COLA compat-mode
* (not needed in PM)
* We also added a 2LPT mode that does just 2LPT.
*
* Yu Feng <rainwoodman@gmail.com>
*
*/
#include <math.h>
#include <string.h>
#include <assert.h>
#include <alloca.h>
#include <mpi.h>
#include <gsl/gsl_integration.h>
#include <gsl/gsl_roots.h>
#include <gsl/gsl_sf_hyperg.h>
#include <gsl/gsl_errno.h>
#include <fastpm/libfastpm.h>
#include <fastpm/logging.h>
#include "pmpfft.h"
#include "vpm.h"
static double
Sq(double ai, double af, double aRef, double nLPT, FastPMCosmology * c, int USE_NONSTDDA);
static double
Sphi(double ai, double af, double aRef, double nLPT, FastPMCosmology * c, int USE_NONSTDDA);
static inline void
fastpm_drift_lookup(FastPMDriftFactor * drift, double af, double * dyyy, double * da1, double * da2)
{
double ind;
if(af == drift->af) {
*dyyy = drift->dyyy[drift->nsamples - 1];
*da1 = drift->da1[drift->nsamples - 1];
*da2 = drift->da2[drift->nsamples - 1];
return;
}
if(af == drift->ai) {
*dyyy = drift->dyyy[0];
*da1 = drift->da1[0];
*da2 = drift->da2[0];
return;
}
{
ind = (af - drift->ai) / (drift->af - drift->ai) * (drift->nsamples - 1);
int l = floor(ind);
double u = l + 1 - ind;
double v = ind - l;
if(l + 1 >= drift->nsamples) {
fastpm_raise(-1, "drift beyond factor's available range. ");
}
*dyyy = drift->dyyy[l] * u + drift->dyyy[l + 1] * v;
*da1 = drift->da1[l] * u + drift->da1[l + 1] * v;
*da2 = drift->da2[l] * u + drift->da2[l + 1] * v;
}
}
inline void
fastpm_drift_one(FastPMDriftFactor * drift, FastPMStore * p, ptrdiff_t i, double xo[3], double af)
{
double dyyy_f, da1_f, da2_f;
double dyyy_i, da1_i, da2_i;
double dyyy, da1, da2;
fastpm_drift_lookup(drift, af, &dyyy_f, &da1_f, &da2_f);
fastpm_drift_lookup(drift, p->meta.a_x, &dyyy_i, &da1_i, &da2_i);
dyyy = dyyy_f - dyyy_i;
da1 = da1_f - da1_i;
da2 = da2_f - da2_i;
int d;
for(d = 0; d < 3; d ++) {
double v;
switch(drift->forcemode) {
case FASTPM_FORCE_2LPT:
xo[d] = p->x[i][d] + p->dx1[i][d] * da1 + p->dx2[i][d] * da2;
break;
case FASTPM_FORCE_ZA:
xo[d] = p->x[i][d] + p->dx1[i][d] * da1;
break;
case FASTPM_FORCE_FASTPM:
case FASTPM_FORCE_PM:
xo[d] = p->x[i][d] + p->v[i][d] * dyyy;
break;
case FASTPM_FORCE_COLA:
/* For cola, remove the lpt velocity to find the residual velocity v*/
v = p->v[i][d] - (p->dx1[i][d]*drift->Dv1 + p->dx2[i][d]*drift->Dv2);
xo[d] = p->x[i][d] + v * dyyy;
xo[d] += p->dx1[i][d] * da1 + p->dx2[i][d] * da2;
break;
}
/* if PGDCorrection is enabled, add it */
if(p->pgdc) {
/* no drift; to protect the pgdc line */
if (drift->ai == drift->af) continue;
xo[d] += 0.5 * p->pgdc[i][d] * dyyy / drift->dyyy[drift->nsamples-1];
}
}
}
static inline void
fastpm_kick_lookup(FastPMKickFactor * kick, double af, double * dda, double * Dv1, double * Dv2)
{
double ind;
if(af == kick->af) {
*dda = kick->dda[kick->nsamples - 1];
*Dv1 = kick->Dv1[kick->nsamples - 1];
*Dv2 = kick->Dv2[kick->nsamples - 1];
return;
}
if(af == kick->ai) {
*dda = kick->dda[0];
*Dv1 = kick->Dv1[0];
*Dv2 = kick->Dv2[0];
return;
}
{
ind = (af - kick->ai) / (kick->af - kick->ai) * (kick->nsamples - 1);
int l = floor(ind);
double u = l + 1 - ind;
double v = ind - l;
if(l + 1 >= kick->nsamples) {
fastpm_raise(-1, "kick beyond factor's available range. ");
}
*dda = kick->dda[l] * u + kick->dda[l + 1] * v;
*Dv1 = kick->Dv1[l] * u + kick->Dv1[l + 1] * v;
*Dv2 = kick->Dv2[l] * u + kick->Dv2[l + 1] * v;
}
}
inline void
fastpm_kick_one(FastPMKickFactor * kick, FastPMStore * p, ptrdiff_t i, float vo[3], double af)
{
double dda_i, Dv1_i, Dv2_i;
double dda_f, Dv1_f, Dv2_f;
double dda, Dv1, Dv2;
fastpm_kick_lookup(kick, af, &dda_f, &Dv1_f, &Dv2_f);
fastpm_kick_lookup(kick, p->meta.a_v, &dda_i, &Dv1_i, &Dv2_i);
dda = dda_f - dda_i;
Dv1 = Dv1_f - Dv1_i;
Dv2 = Dv2_f - Dv2_i;
int d;
for(d = 0; d < 3; d++) {
float ax = p->acc[i][d]; // unlike a_x, which means a at which x is calcd
if(kick->forcemode == FASTPM_FORCE_COLA) {
ax += (p->dx1[i][d]*kick->q1 + p->dx2[i][d]*kick->q2);
}
vo[d] = p->v[i][d] + ax * dda;
if(kick->forcemode == FASTPM_FORCE_COLA) {
vo[d] += (p->dx1[i][d] * Dv1 + p->dx2[i][d] * Dv2);
}
}
}
// Leap frog time integration
void
fastpm_kick_store(FastPMKickFactor * kick,
FastPMStore * pi, FastPMStore * po, double af)
{
int np = pi->np;
// Kick using acceleration at a= ac
// Assume forces at a=ac is in particles->force
int i;
#pragma omp parallel for
for(i=0; i<np; i++) {
int d;
float vo[3];
fastpm_kick_one(kick, pi, i, vo, af);
for(d = 0; d < 3; d++) {
po->v[i][d] = vo[d];
}
}
//velocity is now at a= avel1
po->meta.a_v = af;
}
static double G_p(FastPMGrowthInfo * growth_info)
{
/* integral of G_p */
return growth_info->D1;
}
static double g_p(FastPMGrowthInfo * growth_info)
{
return DGrowthFactorDa(growth_info);
}
static double G_f(FastPMGrowthInfo * growth_info)
{
/* integral of g_f */
double a = growth_info->a;
return a * a * a * HubbleEa(a, growth_info->c) * g_p(growth_info);
}
static double g_f(FastPMGrowthInfo * growth_info)
{
double a = growth_info->a;
FastPMCosmology * c = growth_info->c;
double E = HubbleEa(a, c);
double dEda = DHubbleEaDa(a, c);
double dDda = g_p(growth_info);
double d2Dda2 = D2GrowthFactorDa2(growth_info);
double g_f = 3 * a * a * E * dDda
+ a * a * a * dEda * dDda
+ a * a * a * E * d2Dda2;
return g_f;
}
void fastpm_kick_init(FastPMKickFactor * kick, FastPMSolver * fastpm, double ai, double ac, double af)
{
FastPMCosmology * c = fastpm->cosmology;
kick->forcemode = fastpm->config->FORCE_TYPE;
FastPMGrowthInfo gi_i;
FastPMGrowthInfo gi_c;
FastPMGrowthInfo gi_e;
fastpm_growth_info_init(&gi_i, ai, c);
fastpm_growth_info_init(&gi_c, ac, c);
double E_i = HubbleEa(ai, c);
double E_c = HubbleEa(ac, c);
double D1_i = gi_i.D1;
double D2_i = gi_i.D2;
double f1_i = gi_i.f1;
double f2_i = gi_i.f2;
double D1_c = gi_c.D1;
double D2_c = gi_c.D2;
double Omega_m0 = Omega_source(1, c);
double Omega_mc = Omega_source(ac, c);
// kick->q1,2 are used for the COLA force implementation.
// growth_mode = ODE and LCDM should match for an LCDM background,
// but neither is guaranteed accurate for a background with radiaiton.
// We advise using LCDM mode for forcemode = FASTPM_FORCE_COLA, as in the
// original implementation of FastPM.
kick->q1 = D1_c;
switch (c->growth_mode){
case FASTPM_GROWTH_MODE_LCDM:
kick->q2 = D1_c*D1_c * (1.0 + 7.0/3.0 * pow(Omega_mc, 1.0/143.0));
break;
case FASTPM_GROWTH_MODE_ODE:
kick->q2 = D1_c*D1_c * (1 - D1_c*D1_c/D2_c);
break;
default:
fastpm_raise(-1, "Please enter a valid growth mode.\n");
}
kick->nsamples = 32;
int i;
double Dv1i = D1_i * ai * ai * E_i * f1_i;
double Dv2i = D2_i * ai * ai * E_i * f2_i;
for(i = 0; i < kick->nsamples; i ++) {
double ae = ai * (1.0 * (kick->nsamples - 1 - i) / (kick->nsamples - 1))
+ af * (1.0 * i / (kick->nsamples - 1));
fastpm_growth_info_init(&gi_e, ae, c);
double D1_e = gi_e.D1;
double f1_e = gi_e.f1;
double D2_e = gi_e.D2;
double f2_e = gi_e.f2;
double E_e = HubbleEa(ae, c);
if(kick->forcemode == FASTPM_FORCE_FASTPM) {
kick->dda[i] = -1.5 * Omega_mc * ac
* E_c
* (G_f(&gi_e) - G_f(&gi_i)) / g_f(&gi_c);
} else {
kick->dda[i] = -1.5 * Omega_m0
* Sphi(ai, ae, ac, fastpm->config->nLPT, c, kick->forcemode == FASTPM_FORCE_COLA);
}
kick->Dv1[i] = D1_e * ae * ae * E_e * f1_e - Dv1i;
kick->Dv2[i] = D2_e * ae * ae * E_e * f2_e - Dv2i;
}
kick->ai = ai;
kick->ac = ac;
kick->af = af;
/* Output growth and FastPM factor at af for reference.
This is a weird place to put this, but it's convenient because G and g are static */
fastpm_info("Growth/FastPM factors at a = %6.4f: D1=%g, D2=%g, f1=%g, f2=%g, G_p=%g, G_f=%g, g_p=%g, g_f=%g\n",
ai,
gi_i.D1,
gi_i.D2,
gi_i.f1,
gi_i.f2,
G_p(&gi_i),
G_f(&gi_i),
g_p(&gi_i),
g_f(&gi_i));
}
void
fastpm_drift_init(FastPMDriftFactor * drift, FastPMSolver * fastpm,
double ai, double ac, double af)
{
FastPMCosmology * c = fastpm->cosmology;
drift->forcemode = fastpm->config->FORCE_TYPE;
FastPMGrowthInfo gi_i;
FastPMGrowthInfo gi_c;
FastPMGrowthInfo gi_e;
fastpm_growth_info_init(&gi_i, ai, c);
fastpm_growth_info_init(&gi_c, ac, c);
double E_c = HubbleEa(ac, c);
double D1_i = gi_i.D1;
double D2_i = gi_i.D2;
double D1_c = gi_c.D1;
double D2_c = gi_c.D2;
double f1_c = gi_c.f1;
double f2_c = gi_c.f2;
drift->nsamples = 32;
int i;
for(i = 0; i < drift->nsamples; i ++ ) {
double ae = ai * (1.0 * (drift->nsamples - 1 - i) / (drift->nsamples - 1))
+ af * (1.0 * i / (drift->nsamples - 1));
fastpm_growth_info_init(&gi_e, ae, c); // overwrite each iteration
double D1_e = gi_e.D1;
double D2_e = gi_e.D2;
if (drift->forcemode == FASTPM_FORCE_FASTPM) {
drift->dyyy[i] = 1 / (ac * ac * ac * E_c)
* (G_p(&gi_e) - G_p(&gi_i)) / g_p(&gi_c);
} else {
drift->dyyy[i] = Sq(ai, ae, ac, fastpm->config->nLPT, c, drift->forcemode == FASTPM_FORCE_COLA);
}
drift->da1[i] = D1_e - D1_i; // change in D_1lpt
drift->da2[i] = D2_e - D2_i; // change in D_2lpt
}
drift->af = af;
drift->ai = ai;
drift->ac = ac;
drift->Dv1 = D1_c * ac * ac * E_c * f1_c;
drift->Dv2 = D2_c * ac * ac * E_c * f2_c;
}
void
fastpm_drift_store(FastPMDriftFactor * drift,
FastPMStore * pi, FastPMStore * po,
double af)
{
int np = pi->np;
int i;
// Drift
#pragma omp parallel for
for(i=0; i<np; i++) {
double xo[3] = {0};
fastpm_drift_one(drift, pi, i, xo, af);
int d;
for(d = 0; d < 3; d ++) {
po->x[i][d] = xo[d];
}
}
po->meta.a_x = af;
}
//
// Functions for our modified time-stepping (used when StdDA=0):
//
struct iparam {
FastPMCosmology * cosmology;
double nLPT;
};
double gpQ(double a, double nLPT) {
return pow(a, nLPT);
}
static double stddriftfunc (double a, struct iparam * iparam) {
return 1 / (pow(a, 3) * HubbleEa(a, iparam->cosmology));
}
static double nonstddriftfunc (double a, struct iparam * iparam) {
return gpQ(a, iparam->nLPT)/(pow(a, 3) * HubbleEa(a, iparam->cosmology));
}
static double stdkickfunc (double a, struct iparam * iparam) {
return 1/ (pow(a, 2) * HubbleEa(a, iparam->cosmology));
}
static double integrand(double a, void * params) {
void ** p = (void**) params;
double (*func)(double a, struct iparam * s) = p[0];
struct iparam * s = p[1];
return func(a, s);
}
double integrate(double ai, double af,
struct iparam * iparam,
double (*func)(double , struct iparam * )) {
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (5000);
gsl_function F;
double error;
double result;
F.params = (void*[]){func, iparam};
F.function = integrand;
gsl_integration_qag (&F, ai, af, 0, 1e-8, 5000, 6,
w, &result, &error);
gsl_integration_workspace_free (w);
return result;
}
/*
When StdDA=0, one needs to set nLPT.
assumes time dep. for velocity = B a^nLPT
nLPT is a real number. Sane values lie in the range (-4,3.5). Cannot be 0, but of course can be -> 0 (say 0.001).
See Section A.3 of TZE.
*/
static double
Sq(double ai, double af, double aRef, double nLPT, FastPMCosmology * c, int USE_NONSTDDA)
{
double resultstd, result;
struct iparam iparam[1];
iparam->cosmology = c;
iparam->nLPT = nLPT;
resultstd = integrate(ai, af, iparam, stddriftfunc);
result = integrate(ai, af, iparam, nonstddriftfunc);
result /= gpQ(aRef, nLPT);
/*
fastpm_info("ref time = %6.4f, std drift =%g, non std drift = %g \n",
aRef, resultstd, result); */
if (USE_NONSTDDA)
return result;
else
return resultstd;
}
double DERgpQ(double a, double nLPT) {
/* This must return d(gpQ)/da */
return nLPT*pow(a, nLPT-1);
}
static double
Sphi(double ai, double af, double aRef, double nLPT, FastPMCosmology * c, int USE_NONSTDDA)
{
double result;
double resultstd;
struct iparam iparam[1];
iparam->cosmology = c;
iparam->nLPT = nLPT;
result = (gpQ(af, nLPT) - gpQ(ai, nLPT)) * aRef
/ (pow(aRef, 3) * HubbleEa(aRef, c) * DERgpQ(aRef, nLPT));
resultstd = integrate(ai, af, iparam, stdkickfunc);
/*
fastpm_info("ref time = %6.4f, std kick = %g, non std kick = %g\n",
aRef, resultstd, result); */
if (USE_NONSTDDA) {
return result;
} else {
return resultstd;
}
}
|
tree-parloops.c | /* Loop autoparallelization.
Copyright (C) 2006, 2007 Free Software Foundation, Inc.
Contributed by Sebastian Pop <pop@cri.ensmp.fr> and
Zdenek Dvorak <dvorakz@suse.cz>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING. If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "tree-flow.h"
#include "cfgloop.h"
#include "ggc.h"
#include "tree-data-ref.h"
#include "diagnostic.h"
#include "tree-pass.h"
#include "tree-scalar-evolution.h"
#include "hashtab.h"
#include "langhooks.h"
#include "tree-vectorizer.h"
/* This pass tries to distribute iterations of loops into several threads.
The implementation is straightforward -- for each loop we test whether its
iterations are independent, and if it is the case (and some additional
conditions regarding profitability and correctness are satisfied), we
add OMP_PARALLEL and OMP_FOR codes and let omp expansion machinery do
its job.
The most of the complexity is in bringing the code into shape expected
by the omp expanders:
-- for OMP_FOR, ensuring that the loop has only one induction variable
and that the exit test is at the start of the loop body
-- for OMP_PARALLEL, replacing the references to local addressable
variables by accesses through pointers, and breaking up ssa chains
by storing the values incoming to the parallelized loop to a structure
passed to the new function as an argument (something similar is done
in omp gimplification, unfortunately only a small part of the code
can be shared).
TODO:
-- if there are several parallelizable loops in a function, it may be
possible to generate the threads just once (using synchronization to
ensure that cross-loop dependences are obeyed).
-- handling of common scalar dependence patterns (accumulation, ...)
-- handling of non-innermost loops */
/*
Reduction handling:
currently we use vect_is_simple_reduction() to detect reduction patterns.
The code transformation will be introduced by an example.
parloop
{
int sum=1;
for (i = 0; i < N; i++)
{
x[i] = i + 3;
sum+=x[i];
}
}
gimple-like code:
header_bb:
# sum_29 = PHI <sum_11(5), 1(3)>
# i_28 = PHI <i_12(5), 0(3)>
D.1795_8 = i_28 + 3;
x[i_28] = D.1795_8;
sum_11 = D.1795_8 + sum_29;
i_12 = i_28 + 1;
if (N_6(D) > i_12)
goto header_bb;
exit_bb:
# sum_21 = PHI <sum_11(4)>
printf (&"%d"[0], sum_21);
after reduction transformation (only relevant parts):
parloop
{
....
# Storing the the initial value given by the user. #
.paral_data_store.32.sum.27 = 1;
#pragma omp parallel num_threads(4)
#pragma omp for schedule(static)
# The neutral element corresponding to the particular
reduction's operation, e.g. 0 for PLUS_EXPR,
1 for MULT_EXPR, etc. replaces the user's initial value. #
# sum.27_29 = PHI <sum.27_11, 0>
sum.27_11 = D.1827_8 + sum.27_29;
OMP_CONTINUE
# Adding this reduction phi is done at create_phi_for_local_result() #
# sum.27_56 = PHI <sum.27_11, 0>
OMP_RETURN
# Creating the atomic operation is done at
create_call_for_reduction_1() #
#pragma omp atomic_load
D.1839_59 = *&.paral_data_load.33_51->reduction.23;
D.1840_60 = sum.27_56 + D.1839_59;
#pragma omp atomic_store (D.1840_60);
OMP_RETURN
# collecting the result after the join of the threads is done at
create_loads_for_reductions().
The value computed by the threads is loaded from the
shared struct. #
.paral_data_load.33_52 = &.paral_data_store.32;
sum_37 = .paral_data_load.33_52->sum.27;
sum_43 = D.1795_41 + sum_37;
exit bb:
# sum_21 = PHI <sum_43, sum_26>
printf (&"%d"[0], sum_21);
...
}
*/
/* Minimal number of iterations of a loop that should be executed in each
thread. */
#define MIN_PER_THREAD 100
/* Element of the hashtable, representing a
reduction in the current loop. */
struct reduction_info
{
tree reduc_stmt; /* reduction statement. */
tree reduc_phi; /* The phi node defining the reduction. */
enum tree_code reduction_code; /* code for the reduction operation. */
tree keep_res; /* The PHI_RESULT of this phi is the resulting value
of the reduction variable when existing the loop. */
tree initial_value; /* The initial value of the reduction var before entering the loop. */
tree field; /* the name of the field in the parloop data structure intended for reduction. */
tree init; /* reduction initialization value. */
tree new_phi; /* (helper field) Newly created phi node whose result
will be passed to the atomic operation. Represents
the local result each thread computed for the reduction
operation. */
};
/* Equality and hash functions for hashtab code. */
static int
reduction_info_eq (const void *aa, const void *bb)
{
const struct reduction_info *a = (const struct reduction_info *) aa;
const struct reduction_info *b = (const struct reduction_info *) bb;
return (a->reduc_phi == b->reduc_phi);
}
static hashval_t
reduction_info_hash (const void *aa)
{
const struct reduction_info *a = (const struct reduction_info *) aa;
return htab_hash_pointer (a->reduc_phi);
}
static struct reduction_info *
reduction_phi (htab_t reduction_list, tree phi)
{
struct reduction_info tmpred, *red;
if (htab_elements (reduction_list) == 0)
return NULL;
tmpred.reduc_phi = phi;
red = htab_find (reduction_list, &tmpred);
return red;
}
/* Element of hashtable of names to copy. */
struct name_to_copy_elt
{
unsigned version; /* The version of the name to copy. */
tree new_name; /* The new name used in the copy. */
tree field; /* The field of the structure used to pass the
value. */
};
/* Equality and hash functions for hashtab code. */
static int
name_to_copy_elt_eq (const void *aa, const void *bb)
{
const struct name_to_copy_elt *a = (const struct name_to_copy_elt *) aa;
const struct name_to_copy_elt *b = (const struct name_to_copy_elt *) bb;
return a->version == b->version;
}
static hashval_t
name_to_copy_elt_hash (const void *aa)
{
const struct name_to_copy_elt *a = (const struct name_to_copy_elt *) aa;
return (hashval_t) a->version;
}
/* Returns true if the iterations of LOOP are independent on each other (that
is, if we can execute them in parallel), and if LOOP satisfies other
conditions that we need to be able to parallelize it. Description of number
of iterations is stored to NITER. Reduction analysis is done, if
reductions are found, they are inserted to the REDUCTION_LIST. */
static bool
loop_parallel_p (struct loop *loop, htab_t reduction_list, struct tree_niter_desc *niter)
{
edge exit = single_dom_exit (loop);
VEC (ddr_p, heap) * dependence_relations;
VEC (data_reference_p, heap) * datarefs;
lambda_trans_matrix trans;
bool ret = false;
tree phi;
loop_vec_info simple_loop_info;
/* Only consider innermost loops with just one exit. The innermost-loop
restriction is not necessary, but it makes things simpler. */
if (loop->inner || !exit)
return false;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "\nConsidering loop %d\n", loop->num);
/* We need to know # of iterations, and there should be no uses of values
defined inside loop outside of it, unless the values are invariants of
the loop. */
if (!number_of_iterations_exit (loop, exit, niter, false))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " FAILED: number of iterations not known\n");
return false;
}
simple_loop_info = vect_analyze_loop_form (loop);
for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi))
{
tree reduc_stmt = NULL, operation;
/* ??? TODO: Change this into a generic function that
recognizes reductions. */
if (!is_gimple_reg (PHI_RESULT (phi)))
continue;
if (simple_loop_info)
reduc_stmt = vect_is_simple_reduction (simple_loop_info, phi);
/* Create a reduction_info struct, initialize it and insert it to
the reduction list. */
if (reduc_stmt)
{
PTR *slot;
struct reduction_info *new_reduction;
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file,
"Detected reduction. reduction stmt is: \n");
print_generic_stmt (dump_file, reduc_stmt, 0);
fprintf (dump_file, "\n");
}
new_reduction = XCNEW (struct reduction_info);
new_reduction->reduc_stmt = reduc_stmt;
new_reduction->reduc_phi = phi;
operation = GIMPLE_STMT_OPERAND (reduc_stmt, 1);
new_reduction->reduction_code = TREE_CODE (operation);
slot = htab_find_slot (reduction_list, new_reduction, INSERT);
*slot = new_reduction;
}
}
for (phi = phi_nodes (exit->dest); phi; phi = PHI_CHAIN (phi))
{
struct reduction_info *red;
imm_use_iterator imm_iter;
use_operand_p use_p;
tree reduc_phi;
tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit);
if (is_gimple_reg (val))
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "phi is ");
print_generic_expr (dump_file, phi, 0);
fprintf (dump_file, "arg of phi to exit: value ");
print_generic_expr (dump_file, val, 0);
fprintf (dump_file, " used outside loop\n");
fprintf (dump_file,
" checking if it a part of reduction pattern: \n");
}
if (htab_elements (reduction_list) == 0)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" FAILED: it is not a part of reduction.\n");
return false;
}
reduc_phi = NULL;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, val)
{
if (flow_bb_inside_loop_p (loop, bb_for_stmt (USE_STMT (use_p))))
{
reduc_phi = USE_STMT (use_p);
break;
}
}
red = reduction_phi (reduction_list, reduc_phi);
if (red == NULL)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" FAILED: it is not a part of reduction.\n");
return false;
}
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "reduction phi is ");
print_generic_expr (dump_file, red->reduc_phi, 0);
fprintf (dump_file, "reduction stmt is ");
print_generic_expr (dump_file, red->reduc_stmt, 0);
}
}
}
/* The iterations of the loop may communicate only through bivs whose
iteration space can be distributed efficiently. */
for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi))
{
tree def = PHI_RESULT (phi);
affine_iv iv;
if (is_gimple_reg (def) && !simple_iv (loop, phi, def, &iv, true))
{
struct reduction_info *red;
red = reduction_phi (reduction_list, phi);
if (red == NULL)
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" FAILED: scalar dependency between iterations\n");
return false;
}
}
}
/* We need to version the loop to verify assumptions in runtime. */
if (!can_duplicate_loop_p (loop))
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " FAILED: cannot be duplicated\n");
return false;
}
/* Check for problems with dependences. If the loop can be reversed,
the iterations are independent. */
datarefs = VEC_alloc (data_reference_p, heap, 10);
dependence_relations = VEC_alloc (ddr_p, heap, 10 * 10);
compute_data_dependences_for_loop (loop, true, &datarefs,
&dependence_relations);
if (dump_file && (dump_flags & TDF_DETAILS))
dump_data_dependence_relations (dump_file, dependence_relations);
trans = lambda_trans_matrix_new (1, 1);
LTM_MATRIX (trans)[0][0] = -1;
if (lambda_transform_legal_p (trans, 1, dependence_relations))
{
ret = true;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, " SUCCESS: may be parallelized\n");
}
else if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
" FAILED: data dependencies exist across iterations\n");
free_dependence_relations (dependence_relations);
free_data_refs (datarefs);
return ret;
}
/* Return true when LOOP contains basic blocks marked with the
BB_IRREDUCIBLE_LOOP flag. */
static inline bool
loop_has_blocks_with_irreducible_flag (struct loop *loop)
{
unsigned i;
basic_block *bbs = get_loop_body_in_dom_order (loop);
bool res = true;
for (i = 0; i < loop->num_nodes; i++)
if (bbs[i]->flags & BB_IRREDUCIBLE_LOOP)
goto end;
res = false;
end:
free (bbs);
return res;
}
/* Assigns the address of OBJ in TYPE to an ssa name, and returns this name.
The assignment statement is placed before LOOP. DECL_ADDRESS maps decls
to their addresses that can be reused. The address of OBJ is known to
be invariant in the whole function. */
static tree
take_address_of (tree obj, tree type, struct loop *loop, htab_t decl_address)
{
int uid;
void **dslot;
struct int_tree_map ielt, *nielt;
tree *var_p, name, bvar, stmt, addr;
edge entry = loop_preheader_edge (loop);
/* Since the address of OBJ is invariant, the trees may be shared.
Avoid rewriting unrelated parts of the code. */
obj = unshare_expr (obj);
for (var_p = &obj;
handled_component_p (*var_p);
var_p = &TREE_OPERAND (*var_p, 0))
continue;
uid = DECL_UID (*var_p);
ielt.uid = uid;
dslot = htab_find_slot_with_hash (decl_address, &ielt, uid, INSERT);
if (!*dslot)
{
addr = build_addr (*var_p, current_function_decl);
bvar = create_tmp_var (TREE_TYPE (addr), get_name (*var_p));
add_referenced_var (bvar);
stmt = build_gimple_modify_stmt (bvar, addr);
name = make_ssa_name (bvar, stmt);
GIMPLE_STMT_OPERAND (stmt, 0) = name;
bsi_insert_on_edge_immediate (entry, stmt);
nielt = XNEW (struct int_tree_map);
nielt->uid = uid;
nielt->to = name;
*dslot = nielt;
}
else
name = ((struct int_tree_map *) *dslot)->to;
if (var_p != &obj)
{
*var_p = build1 (INDIRECT_REF, TREE_TYPE (*var_p), name);
name = force_gimple_operand (build_addr (obj, current_function_decl),
&stmt, true, NULL_TREE);
if (stmt)
bsi_insert_on_edge_immediate (entry, stmt);
}
if (TREE_TYPE (name) != type)
{
name = force_gimple_operand (fold_convert (type, name), &stmt, true,
NULL_TREE);
if (stmt)
bsi_insert_on_edge_immediate (entry, stmt);
}
return name;
}
/* Callback for htab_traverse. Create the initialization statement
for reduction described in SLOT, and place it at the preheader of
the loop described in DATA. */
static int
initialize_reductions (void **slot, void *data)
{
tree init, c;
tree bvar, type, arg;
edge e;
struct reduction_info *reduc = *slot;
struct loop *loop = (struct loop *) data;
/* Create initialization in preheader:
reduction_variable = initialization value of reduction. */
/* In the phi node at the header, replace the argument coming
from the preheader with the reduction initialization value. */
/* Create a new variable to initialize the reduction. */
type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi));
bvar = create_tmp_var (type, "reduction");
add_referenced_var (bvar);
c = build_omp_clause (OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_REDUCTION_CODE (c) = reduc->reduction_code;
OMP_CLAUSE_DECL (c) =
SSA_NAME_VAR (GIMPLE_STMT_OPERAND (reduc->reduc_stmt, 0));
init = omp_reduction_init (c, TREE_TYPE (bvar));
reduc->init = init;
/* Replace the argument representing the initialization value
with the initialization value for the reduction (neutral
element for the particular operation, e.g. 0 for PLUS_EXPR,
1 for MULT_EXPR, etc).
Keep the old value in a new variable "reduction_initial",
that will be taken in consideration after the parallel
computing is done. */
e = loop_preheader_edge (loop);
arg = PHI_ARG_DEF_FROM_EDGE (reduc->reduc_phi, e);
/* Create new variable to hold the initial value. */
SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE
(reduc->reduc_phi, loop_preheader_edge (loop)), init);
reduc->initial_value = arg;
return 1;
}
struct elv_data
{
struct loop *loop;
htab_t decl_address;
bool changed;
};
/* Eliminates references to local variables in *TP out of LOOP. DECL_ADDRESS
contains addresses of the references that had their address taken already.
If the expression is changed, CHANGED is set to true. Callback for
walk_tree. */
static tree
eliminate_local_variables_1 (tree *tp, int *walk_subtrees, void *data)
{
struct elv_data *dta = data;
tree t = *tp, var, addr, addr_type, type, obj;
if (DECL_P (t))
{
*walk_subtrees = 0;
if (!SSA_VAR_P (t) || DECL_EXTERNAL (t))
return NULL_TREE;
type = TREE_TYPE (t);
addr_type = build_pointer_type (type);
addr = take_address_of (t, addr_type, dta->loop, dta->decl_address);
*tp = build1 (INDIRECT_REF, TREE_TYPE (*tp), addr);
dta->changed = true;
return NULL_TREE;
}
if (TREE_CODE (t) == ADDR_EXPR)
{
/* ADDR_EXPR may appear in two contexts:
-- as a gimple operand, when the address taken is a function invariant
-- as gimple rhs, when the resulting address in not a function
invariant
We do not need to do anything special in the latter case (the base of
the memory reference whose address is taken may be replaced in the
DECL_P case). The former case is more complicated, as we need to
ensure that the new address is still a gimple operand. Thus, it
is not sufficient to replace just the base of the memory reference --
we need to move the whole computation of the address out of the
loop. */
if (!is_gimple_val (t))
return NULL_TREE;
*walk_subtrees = 0;
obj = TREE_OPERAND (t, 0);
var = get_base_address (obj);
if (!var || !SSA_VAR_P (var) || DECL_EXTERNAL (var))
return NULL_TREE;
addr_type = TREE_TYPE (t);
addr = take_address_of (obj, addr_type, dta->loop, dta->decl_address);
*tp = addr;
dta->changed = true;
return NULL_TREE;
}
if (!EXPR_P (t) && !GIMPLE_STMT_P (t))
*walk_subtrees = 0;
return NULL_TREE;
}
/* Moves the references to local variables in STMT from LOOP. DECL_ADDRESS
contains addresses for the references for that we have already taken
them. */
static void
eliminate_local_variables_stmt (struct loop *loop, tree stmt,
htab_t decl_address)
{
struct elv_data dta;
dta.loop = loop;
dta.decl_address = decl_address;
dta.changed = false;
walk_tree (&stmt, eliminate_local_variables_1, &dta, NULL);
if (dta.changed)
update_stmt (stmt);
}
/* Eliminates the references to local variables from LOOP.
This includes:
1) Taking address of a local variable -- these are moved out of the
loop (and temporary variable is created to hold the address if
necessary).
2) Dereferencing a local variable -- these are replaced with indirect
references. */
static void
eliminate_local_variables (struct loop *loop)
{
basic_block bb, *body = get_loop_body (loop);
unsigned i;
block_stmt_iterator bsi;
htab_t decl_address = htab_create (10, int_tree_map_hash, int_tree_map_eq,
free);
/* Find and rename the ssa names defined outside of loop. */
for (i = 0; i < loop->num_nodes; i++)
{
bb = body[i];
for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
eliminate_local_variables_stmt (loop, bsi_stmt (bsi), decl_address);
}
htab_delete (decl_address);
}
/* If COPY_NAME_P is true, creates and returns a duplicate of NAME.
The copies are stored to NAME_COPIES, if NAME was already duplicated,
its duplicate stored in NAME_COPIES is returned.
Regardless of COPY_NAME_P, the decl used as a base of the ssa name is also
duplicated, storing the copies in DECL_COPIES. */
static tree
separate_decls_in_loop_name (tree name,
htab_t name_copies, htab_t decl_copies,
bool copy_name_p)
{
tree copy, var, var_copy;
unsigned idx, uid, nuid;
struct int_tree_map ielt, *nielt;
struct name_to_copy_elt elt, *nelt;
void **slot, **dslot;
if (TREE_CODE (name) != SSA_NAME)
return name;
idx = SSA_NAME_VERSION (name);
elt.version = idx;
slot = htab_find_slot_with_hash (name_copies, &elt, idx,
copy_name_p ? INSERT : NO_INSERT);
if (slot && *slot)
return ((struct name_to_copy_elt *) *slot)->new_name;
var = SSA_NAME_VAR (name);
uid = DECL_UID (var);
ielt.uid = uid;
dslot = htab_find_slot_with_hash (decl_copies, &ielt, uid, INSERT);
if (!*dslot)
{
var_copy = create_tmp_var (TREE_TYPE (var), get_name (var));
DECL_GIMPLE_REG_P (var_copy) = DECL_GIMPLE_REG_P (var);
add_referenced_var (var_copy);
nielt = XNEW (struct int_tree_map);
nielt->uid = uid;
nielt->to = var_copy;
*dslot = nielt;
/* Ensure that when we meet this decl next time, we won't duplicate
it again. */
nuid = DECL_UID (var_copy);
ielt.uid = nuid;
dslot = htab_find_slot_with_hash (decl_copies, &ielt, nuid, INSERT);
gcc_assert (!*dslot);
nielt = XNEW (struct int_tree_map);
nielt->uid = nuid;
nielt->to = var_copy;
*dslot = nielt;
}
else
var_copy = ((struct int_tree_map *) *dslot)->to;
if (copy_name_p)
{
copy = duplicate_ssa_name (name, NULL_TREE);
nelt = XNEW (struct name_to_copy_elt);
nelt->version = idx;
nelt->new_name = copy;
nelt->field = NULL_TREE;
*slot = nelt;
}
else
{
gcc_assert (!slot);
copy = name;
}
SSA_NAME_VAR (copy) = var_copy;
return copy;
}
/* Finds the ssa names used in STMT that are defined outside of LOOP and
replaces such ssa names with their duplicates. The duplicates are stored to
NAME_COPIES. Base decls of all ssa names used in STMT
(including those defined in LOOP) are replaced with the new temporary
variables; the replacement decls are stored in DECL_COPIES. */
static void
separate_decls_in_loop_stmt (struct loop *loop, tree stmt,
htab_t name_copies, htab_t decl_copies)
{
use_operand_p use;
def_operand_p def;
ssa_op_iter oi;
tree name, copy;
bool copy_name_p;
mark_virtual_ops_for_renaming (stmt);
FOR_EACH_PHI_OR_STMT_DEF (def, stmt, oi, SSA_OP_DEF)
{
name = DEF_FROM_PTR (def);
gcc_assert (TREE_CODE (name) == SSA_NAME);
copy = separate_decls_in_loop_name (name, name_copies, decl_copies,
false);
gcc_assert (copy == name);
}
FOR_EACH_PHI_OR_STMT_USE (use, stmt, oi, SSA_OP_USE)
{
name = USE_FROM_PTR (use);
if (TREE_CODE (name) != SSA_NAME)
continue;
copy_name_p = expr_invariant_in_loop_p (loop, name);
copy = separate_decls_in_loop_name (name, name_copies, decl_copies,
copy_name_p);
SET_USE (use, copy);
}
}
/* Callback for htab_traverse. Adds a field corresponding to the reduction
specified in SLOT. The type is passed in DATA. */
static int
add_field_for_reduction (void **slot, void *data)
{
struct reduction_info *red = *slot;
tree type = data;
tree var = SSA_NAME_VAR (GIMPLE_STMT_OPERAND (red->reduc_stmt, 0));
tree field = build_decl (FIELD_DECL, DECL_NAME (var), TREE_TYPE (var));
insert_field_into_struct (type, field);
red->field = field;
return 1;
}
/* Callback for htab_traverse. Adds a field corresponding to a ssa name
described in SLOT. The type is passed in DATA. */
static int
add_field_for_name (void **slot, void *data)
{
struct name_to_copy_elt *elt = *slot;
tree type = data;
tree name = ssa_name (elt->version);
tree var = SSA_NAME_VAR (name);
tree field = build_decl (FIELD_DECL, DECL_NAME (var), TREE_TYPE (var));
insert_field_into_struct (type, field);
elt->field = field;
return 1;
}
/* Callback for htab_traverse. A local result is the intermediate result
computed by a single
thread, or the intial value in case no iteration was executed.
This function creates a phi node reflecting these values.
The phi's result will be stored in NEW_PHI field of the
reduction's data structure. */
static int
create_phi_for_local_result (void **slot, void *data)
{
struct reduction_info *reduc = *slot;
struct loop *loop = data;
edge e;
tree new_phi;
basic_block store_bb;
tree local_res;
/* STORE_BB is the block where the phi
should be stored. It is the destination of the loop exit.
(Find the fallthru edge from OMP_CONTINUE). */
store_bb = FALLTHRU_EDGE (loop->latch)->dest;
/* STORE_BB has two predecessors. One coming from the loop
(the reduction's result is computed at the loop),
and another coming from a block preceding the loop,
when no iterations
are executed (the initial value should be taken). */
if (EDGE_PRED (store_bb, 0) == FALLTHRU_EDGE (loop->latch))
e = EDGE_PRED (store_bb, 1);
else
e = EDGE_PRED (store_bb, 0);
local_res = make_ssa_name (SSA_NAME_VAR (GIMPLE_STMT_OPERAND (reduc->reduc_stmt, 0)), NULL_TREE);
new_phi = create_phi_node (local_res, store_bb);
SSA_NAME_DEF_STMT (local_res) = new_phi;
add_phi_arg (new_phi, reduc->init, e);
add_phi_arg (new_phi, GIMPLE_STMT_OPERAND (reduc->reduc_stmt, 0),
FALLTHRU_EDGE (loop->latch));
reduc->new_phi = new_phi;
return 1;
}
struct clsn_data
{
tree store;
tree load;
basic_block store_bb;
basic_block load_bb;
};
/* Callback for htab_traverse. Create an atomic instruction for the
reduction described in SLOT.
DATA annotates the place in memory the atomic operation relates to,
and the basic block it needs to be generated in. */
static int
create_call_for_reduction_1 (void **slot, void *data)
{
struct reduction_info *reduc = *slot;
struct clsn_data *clsn_data = data;
block_stmt_iterator bsi;
tree type = TREE_TYPE (PHI_RESULT (reduc->reduc_phi));
tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
tree load_struct;
basic_block bb;
basic_block new_bb;
edge e;
tree t, addr, addr_type, ref, x;
tree tmp_load, load, name;
load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
t = build3 (COMPONENT_REF, type, load_struct, reduc->field, NULL_TREE);
addr_type = build_pointer_type (type);
addr = build_addr (t, current_function_decl);
/* Create phi node. */
bb = clsn_data->load_bb;
e = split_block (bb, t);
new_bb = e->dest;
tmp_load = create_tmp_var (TREE_TYPE (TREE_TYPE (addr)), NULL);
add_referenced_var (tmp_load);
tmp_load = make_ssa_name (tmp_load, NULL);
load = build2 (OMP_ATOMIC_LOAD, void_type_node, tmp_load, addr);
SSA_NAME_DEF_STMT (tmp_load) = load;
bsi = bsi_start (new_bb);
bsi_insert_after (&bsi, load, BSI_NEW_STMT);
e = split_block (new_bb, load);
new_bb = e->dest;
bsi = bsi_start (new_bb);
ref = tmp_load;
x =
fold_build2 (reduc->reduction_code,
TREE_TYPE (PHI_RESULT (reduc->new_phi)), ref,
PHI_RESULT (reduc->new_phi));
name =
force_gimple_operand_bsi (&bsi, x, true, NULL_TREE, true,
BSI_CONTINUE_LINKING);
x = build1 (OMP_ATOMIC_STORE, void_type_node, name);
bsi_insert_after (&bsi, x, BSI_NEW_STMT);
return 1;
}
/* Create the atomic operation at the join point of the threads.
REDUCTION_LIST describes the reductions in the LOOP.
LD_ST_DATA describes the shared data structure where
shared data is stored in and loaded from. */
static void
create_call_for_reduction (struct loop *loop, htab_t reduction_list,
struct clsn_data *ld_st_data)
{
htab_traverse (reduction_list, create_phi_for_local_result, loop);
/* Find the fallthru edge from OMP_CONTINUE. */
ld_st_data->load_bb = FALLTHRU_EDGE (loop->latch)->dest;
htab_traverse (reduction_list, create_call_for_reduction_1, ld_st_data);
}
/* Callback for htab_traverse. Loads the final reduction value at the
join point of all threads, and inserts it in the right place. */
static int
create_loads_for_reductions (void **slot, void *data)
{
struct reduction_info *red = *slot;
struct clsn_data *clsn_data = data;
tree stmt;
block_stmt_iterator bsi;
tree type = TREE_TYPE (GIMPLE_STMT_OPERAND (red->reduc_stmt, 0));
tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
tree load_struct;
tree name;
tree x;
bsi = bsi_after_labels (clsn_data->load_bb);
load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
load_struct = build3 (COMPONENT_REF, type, load_struct, red->field,
NULL_TREE);
x = load_struct;
name = PHI_RESULT (red->keep_res);
stmt = build_gimple_modify_stmt (name, x);
GIMPLE_STMT_OPERAND (stmt, 0) = name;
SSA_NAME_DEF_STMT (name) = stmt;
bsi_insert_after (&bsi, stmt, BSI_NEW_STMT);
remove_phi_node (red->keep_res, NULL_TREE, false);
return 1;
}
/* Load the reduction result that was stored in LD_ST_DATA.
REDUCTION_LIST describes the list of reductions that the
loades should be generated for. */
static void
create_final_loads_for_reduction (htab_t reduction_list,
struct clsn_data *ld_st_data)
{
block_stmt_iterator bsi;
tree t;
bsi = bsi_after_labels (ld_st_data->load_bb);
t = build_fold_addr_expr (ld_st_data->store);
t =
build_gimple_modify_stmt (ld_st_data->load,
build_fold_addr_expr (ld_st_data->store));
bsi_insert_before (&bsi, t, BSI_NEW_STMT);
SSA_NAME_DEF_STMT (ld_st_data->load) = t;
GIMPLE_STMT_OPERAND (t, 0) = ld_st_data->load;
htab_traverse (reduction_list, create_loads_for_reductions, ld_st_data);
}
/* Callback for htab_traverse. Store the neutral value for the
particular reduction's operation, e.g. 0 for PLUS_EXPR,
1 for MULT_EXPR, etc. into the reduction field.
The reduction is specified in SLOT. The store information is
passed in DATA. */
static int
create_stores_for_reduction (void **slot, void *data)
{
struct reduction_info *red = *slot;
struct clsn_data *clsn_data = data;
tree stmt;
block_stmt_iterator bsi;
tree type = TREE_TYPE (GIMPLE_STMT_OPERAND (red->reduc_stmt, 0));
bsi = bsi_last (clsn_data->store_bb);
stmt =
build_gimple_modify_stmt (build3
(COMPONENT_REF, type, clsn_data->store,
red->field, NULL_TREE),
red->initial_value);
mark_virtual_ops_for_renaming (stmt);
bsi_insert_after (&bsi, stmt, BSI_NEW_STMT);
return 1;
}
/* Callback for htab_traverse. Creates loads to a field of LOAD in LOAD_BB and
store to a field of STORE in STORE_BB for the ssa name and its duplicate
specified in SLOT. */
static int
create_loads_and_stores_for_name (void **slot, void *data)
{
struct name_to_copy_elt *elt = *slot;
struct clsn_data *clsn_data = data;
tree stmt;
block_stmt_iterator bsi;
tree type = TREE_TYPE (elt->new_name);
tree struct_type = TREE_TYPE (TREE_TYPE (clsn_data->load));
tree load_struct;
bsi = bsi_last (clsn_data->store_bb);
stmt =
build_gimple_modify_stmt (build3
(COMPONENT_REF, type, clsn_data->store,
elt->field, NULL_TREE),
ssa_name (elt->version));
mark_virtual_ops_for_renaming (stmt);
bsi_insert_after (&bsi, stmt, BSI_NEW_STMT);
bsi = bsi_last (clsn_data->load_bb);
load_struct = fold_build1 (INDIRECT_REF, struct_type, clsn_data->load);
stmt = build_gimple_modify_stmt (elt->new_name,
build3 (COMPONENT_REF, type, load_struct,
elt->field, NULL_TREE));
SSA_NAME_DEF_STMT (elt->new_name) = stmt;
bsi_insert_after (&bsi, stmt, BSI_NEW_STMT);
return 1;
}
/* Moves all the variables used in LOOP and defined outside of it (including
the initial values of loop phi nodes, and *PER_THREAD if it is a ssa
name) to a structure created for this purpose. The code
while (1)
{
use (a);
use (b);
}
is transformed this way:
bb0:
old.a = a;
old.b = b;
bb1:
a' = new->a;
b' = new->b;
while (1)
{
use (a');
use (b');
}
`old' is stored to *ARG_STRUCT and `new' is stored to NEW_ARG_STRUCT. The
pointer `new' is intentionally not initialized (the loop will be split to a
separate function later, and `new' will be initialized from its arguments).
LD_ST_DATA holds information about the shared data structure used to pass
information among the threads. It is initialized here, and
gen_parallel_loop will pass it to create_call_for_reduction that
needs this information. REDUCTION_LIST describes the reductions
in LOOP. */
static void
separate_decls_in_loop (struct loop *loop, htab_t reduction_list,
tree * arg_struct, tree * new_arg_struct,
struct clsn_data *ld_st_data)
{
basic_block bb1 = split_edge (loop_preheader_edge (loop));
basic_block bb0 = single_pred (bb1);
htab_t name_copies = htab_create (10, name_to_copy_elt_hash,
name_to_copy_elt_eq, free);
htab_t decl_copies = htab_create (10, int_tree_map_hash, int_tree_map_eq,
free);
basic_block bb, *body = get_loop_body (loop);
unsigned i;
tree phi, type, type_name, nvar;
block_stmt_iterator bsi;
struct clsn_data clsn_data;
/* Find and rename the ssa names defined outside of loop. */
for (i = 0; i < loop->num_nodes; i++)
{
bb = body[i];
for (phi = phi_nodes (bb); phi; phi = PHI_CHAIN (phi))
separate_decls_in_loop_stmt (loop, phi, name_copies, decl_copies);
for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
separate_decls_in_loop_stmt (loop, bsi_stmt (bsi), name_copies,
decl_copies);
}
free (body);
if (htab_elements (name_copies) == 0)
{
/* It may happen that there is nothing to copy (if there are only
loop carried and external variables in the loop). */
*arg_struct = NULL;
*new_arg_struct = NULL;
}
else
{
/* Create the type for the structure to store the ssa names to. */
type = lang_hooks.types.make_type (RECORD_TYPE);
type_name = build_decl (TYPE_DECL, create_tmp_var_name (".paral_data"),
type);
TYPE_NAME (type) = type_name;
htab_traverse (name_copies, add_field_for_name, type);
if (htab_elements (reduction_list) > 0)
{
/* Create the fields for reductions. */
htab_traverse (reduction_list, add_field_for_reduction,
type);
}
layout_type (type);
/* Create the loads and stores. */
*arg_struct = create_tmp_var (type, ".paral_data_store");
add_referenced_var (*arg_struct);
nvar = create_tmp_var (build_pointer_type (type), ".paral_data_load");
add_referenced_var (nvar);
*new_arg_struct = make_ssa_name (nvar, NULL_TREE);
ld_st_data->store = *arg_struct;
ld_st_data->load = *new_arg_struct;
ld_st_data->store_bb = bb0;
ld_st_data->load_bb = bb1;
htab_traverse (name_copies, create_loads_and_stores_for_name,
ld_st_data);
/* Load the calculation from memory (after the join of the threads). */
if (htab_elements (reduction_list) > 0)
{
htab_traverse (reduction_list, create_stores_for_reduction,
ld_st_data);
clsn_data.load = make_ssa_name (nvar, NULL_TREE);
clsn_data.load_bb = single_dom_exit (loop)->dest;
clsn_data.store = ld_st_data->store;
create_final_loads_for_reduction (reduction_list, &clsn_data);
}
}
htab_delete (decl_copies);
htab_delete (name_copies);
}
/* Bitmap containing uids of functions created by parallelization. We cannot
allocate it from the default obstack, as it must live across compilation
of several functions; we make it gc allocated instead. */
static GTY(()) bitmap parallelized_functions;
/* Returns true if FN was created by create_loop_fn. */
static bool
parallelized_function_p (tree fn)
{
if (!parallelized_functions || !DECL_ARTIFICIAL (fn))
return false;
return bitmap_bit_p (parallelized_functions, DECL_UID (fn));
}
/* Creates and returns an empty function that will receive the body of
a parallelized loop. */
static tree
create_loop_fn (void)
{
char buf[100];
char *tname;
tree decl, type, name, t;
struct function *act_cfun = cfun;
static unsigned loopfn_num;
snprintf (buf, 100, "%s.$loopfn", current_function_name ());
ASM_FORMAT_PRIVATE_NAME (tname, buf, loopfn_num++);
clean_symbol_name (tname);
name = get_identifier (tname);
type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
decl = build_decl (FUNCTION_DECL, name, type);
if (!parallelized_functions)
parallelized_functions = BITMAP_GGC_ALLOC ();
bitmap_set_bit (parallelized_functions, DECL_UID (decl));
TREE_STATIC (decl) = 1;
TREE_USED (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 0;
TREE_PUBLIC (decl) = 0;
DECL_UNINLINABLE (decl) = 1;
DECL_EXTERNAL (decl) = 0;
DECL_CONTEXT (decl) = NULL_TREE;
DECL_INITIAL (decl) = make_node (BLOCK);
t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_IGNORED_P (t) = 1;
DECL_RESULT (decl) = t;
t = build_decl (PARM_DECL, get_identifier (".paral_data_param"),
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = decl;
TREE_USED (t) = 1;
DECL_ARGUMENTS (decl) = t;
allocate_struct_function (decl, false);
/* The call to allocate_struct_function clobbers CFUN, so we need to restore
it. */
set_cfun (act_cfun);
return decl;
}
/* Bases all the induction variables in LOOP on a single induction variable
(unsigned with base 0 and step 1), whose final value is compared with
NIT. The induction variable is incremented in the loop latch.
REDUCTION_LIST describes the reductions in LOOP. */
static void
canonicalize_loop_ivs (struct loop *loop, htab_t reduction_list, tree nit)
{
unsigned precision = TYPE_PRECISION (TREE_TYPE (nit));
tree phi, prev, res, type, var_before, val, atype, mtype, t, next;
block_stmt_iterator bsi;
bool ok;
affine_iv iv;
edge exit = single_dom_exit (loop);
struct reduction_info *red;
for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi))
{
res = PHI_RESULT (phi);
if (is_gimple_reg (res) && TYPE_PRECISION (TREE_TYPE (res)) > precision)
precision = TYPE_PRECISION (TREE_TYPE (res));
}
type = lang_hooks.types.type_for_size (precision, 1);
bsi = bsi_last (loop->latch);
create_iv (build_int_cst_type (type, 0), build_int_cst (type, 1), NULL_TREE,
loop, &bsi, true, &var_before, NULL);
bsi = bsi_after_labels (loop->header);
prev = NULL;
for (phi = phi_nodes (loop->header); phi; phi = next)
{
next = PHI_CHAIN (phi);
res = PHI_RESULT (phi);
if (!is_gimple_reg (res) || res == var_before)
{
prev = phi;
continue;
}
ok = simple_iv (loop, phi, res, &iv, true);
red = reduction_phi (reduction_list, phi);
/* We preserve the reduction phi nodes. */
if (!ok && red)
{
prev = phi;
continue;
}
else
gcc_assert (ok);
remove_phi_node (phi, prev, false);
atype = TREE_TYPE (res);
mtype = POINTER_TYPE_P (atype) ? sizetype : atype;
val = fold_build2 (MULT_EXPR, mtype, unshare_expr (iv.step),
fold_convert (mtype, var_before));
val = fold_build2 (POINTER_TYPE_P (atype)
? POINTER_PLUS_EXPR : PLUS_EXPR,
atype, unshare_expr (iv.base), val);
val = force_gimple_operand_bsi (&bsi, val, false, NULL_TREE, true,
BSI_SAME_STMT);
t = build_gimple_modify_stmt (res, val);
bsi_insert_before (&bsi, t, BSI_SAME_STMT);
SSA_NAME_DEF_STMT (res) = t;
}
t = last_stmt (exit->src);
/* Make the loop exit if the control condition is not satisfied. */
if (exit->flags & EDGE_TRUE_VALUE)
{
edge te, fe;
extract_true_false_edges_from_block (exit->src, &te, &fe);
te->flags = EDGE_FALSE_VALUE;
fe->flags = EDGE_TRUE_VALUE;
}
COND_EXPR_COND (t) = build2 (LT_EXPR, boolean_type_node, var_before, nit);
}
/* Moves the exit condition of LOOP to the beginning of its header, and
duplicates the part of the last iteration that gets disabled to the
exit of the loop. NIT is the number of iterations of the loop
(used to initialize the variables in the duplicated part).
TODO: the common case is that latch of the loop is empty and immediatelly
follows the loop exit. In this case, it would be better not to copy the
body of the loop, but only move the entry of the loop directly before the
exit check and increase the number of iterations of the loop by one.
This may need some additional preconditioning in case NIT = ~0.
REDUCTION_LIST describes the reductions in LOOP. */
static void
transform_to_exit_first_loop (struct loop *loop, htab_t reduction_list, tree nit)
{
basic_block *bbs, *nbbs, ex_bb, orig_header;
unsigned n;
bool ok;
edge exit = single_dom_exit (loop), hpred;
tree phi, nphi, cond, control, control_name, res, t, cond_stmt;
block_stmt_iterator bsi;
split_block_after_labels (loop->header);
orig_header = single_succ (loop->header);
hpred = single_succ_edge (loop->header);
cond_stmt = last_stmt (exit->src);
cond = COND_EXPR_COND (cond_stmt);
control = TREE_OPERAND (cond, 0);
gcc_assert (TREE_OPERAND (cond, 1) == nit);
/* Make sure that we have phi nodes on exit for all loop header phis
(create_parallel_loop requires that). */
for (phi = phi_nodes (loop->header); phi; phi = PHI_CHAIN (phi))
{
res = PHI_RESULT (phi);
t = make_ssa_name (SSA_NAME_VAR (res), phi);
SET_PHI_RESULT (phi, t);
nphi = create_phi_node (res, orig_header);
SSA_NAME_DEF_STMT (res) = nphi;
add_phi_arg (nphi, t, hpred);
if (res == control)
{
TREE_OPERAND (cond, 0) = t;
update_stmt (cond_stmt);
control = t;
}
}
bbs = get_loop_body_in_dom_order (loop);
for (n = 0; bbs[n] != exit->src; n++)
continue;
nbbs = XNEWVEC (basic_block, n);
ok = tree_duplicate_sese_tail (single_succ_edge (loop->header), exit,
bbs + 1, n, nbbs);
gcc_assert (ok);
free (bbs);
ex_bb = nbbs[0];
free (nbbs);
/* Other than reductions, the only gimple reg that should be copied
out of the loop is the control variable. */
control_name = NULL_TREE;
for (phi = phi_nodes (ex_bb); phi; phi = PHI_CHAIN (phi))
{
res = PHI_RESULT (phi);
if (!is_gimple_reg (res))
continue;
/* Check if it is a part of reduction. If it is,
keep the phi at the reduction's keep_res field. The
PHI_RESULT of this phi is the resulting value of the reduction
variable when exiting the loop. */
exit = single_dom_exit (loop);
if (htab_elements (reduction_list) > 0)
{
struct reduction_info *red;
tree val = PHI_ARG_DEF_FROM_EDGE (phi, exit);
red = reduction_phi (reduction_list, SSA_NAME_DEF_STMT (val));
if (red)
red->keep_res = phi;
}
else
gcc_assert (control_name == NULL_TREE
&& SSA_NAME_VAR (res) == SSA_NAME_VAR (control));
control_name = res;
}
gcc_assert (control_name != NULL_TREE);
phi = SSA_NAME_DEF_STMT (control_name);
remove_phi_node (phi, NULL_TREE, false);
/* Initialize the control variable to NIT. */
bsi = bsi_after_labels (ex_bb);
nit = force_gimple_operand_bsi (&bsi,
fold_convert (TREE_TYPE (control_name), nit),
false, NULL_TREE, false, BSI_SAME_STMT);
t = build_gimple_modify_stmt (control_name, nit);
bsi_insert_before (&bsi, t, BSI_NEW_STMT);
SSA_NAME_DEF_STMT (control_name) = t;
}
/* Create the parallel constructs for LOOP as described in gen_parallel_loop.
LOOP_FN and DATA are the arguments of OMP_PARALLEL.
NEW_DATA is the variable that should be initialized from the argument
of LOOP_FN. N_THREADS is the requested number of threads. Returns the
basic block containing OMP_PARALLEL tree. */
static basic_block
create_parallel_loop (struct loop *loop, tree loop_fn, tree data,
tree new_data, unsigned n_threads)
{
block_stmt_iterator bsi;
basic_block bb, paral_bb, for_bb, ex_bb;
tree t, param, res, for_stmt;
tree cvar, cvar_init, initvar, cvar_next, cvar_base, cond, phi, type;
edge exit, nexit, guard, end, e;
/* Prepare the OMP_PARALLEL statement. */
bb = loop_preheader_edge (loop)->src;
paral_bb = single_pred (bb);
bsi = bsi_last (paral_bb);
t = build_omp_clause (OMP_CLAUSE_NUM_THREADS);
OMP_CLAUSE_NUM_THREADS_EXPR (t)
= build_int_cst (integer_type_node, n_threads);
t = build4 (OMP_PARALLEL, void_type_node, NULL_TREE, t, loop_fn, data);
bsi_insert_after (&bsi, t, BSI_NEW_STMT);
/* Initialize NEW_DATA. */
if (data)
{
bsi = bsi_after_labels (bb);
param = make_ssa_name (DECL_ARGUMENTS (loop_fn), NULL_TREE);
t = build_gimple_modify_stmt (param, build_fold_addr_expr (data));
bsi_insert_before (&bsi, t, BSI_SAME_STMT);
SSA_NAME_DEF_STMT (param) = t;
t = build_gimple_modify_stmt (new_data,
fold_convert (TREE_TYPE (new_data),
param));
bsi_insert_before (&bsi, t, BSI_SAME_STMT);
SSA_NAME_DEF_STMT (new_data) = t;
}
/* Emit OMP_RETURN for OMP_PARALLEL. */
bb = split_loop_exit_edge (single_dom_exit (loop));
bsi = bsi_last (bb);
bsi_insert_after (&bsi, make_node (OMP_RETURN), BSI_NEW_STMT);
/* Extract data for OMP_FOR. */
gcc_assert (loop->header == single_dom_exit (loop)->src);
cond = COND_EXPR_COND (last_stmt (loop->header));
cvar = TREE_OPERAND (cond, 0);
cvar_base = SSA_NAME_VAR (cvar);
phi = SSA_NAME_DEF_STMT (cvar);
cvar_init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
initvar = make_ssa_name (cvar_base, NULL_TREE);
SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, loop_preheader_edge (loop)),
initvar);
cvar_next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
bsi = bsi_last (loop->latch);
gcc_assert (bsi_stmt (bsi) == SSA_NAME_DEF_STMT (cvar_next));
bsi_remove (&bsi, true);
/* Prepare cfg. */
for_bb = split_edge (loop_preheader_edge (loop));
ex_bb = split_loop_exit_edge (single_dom_exit (loop));
extract_true_false_edges_from_block (loop->header, &nexit, &exit);
gcc_assert (exit == single_dom_exit (loop));
guard = make_edge (for_bb, ex_bb, 0);
single_succ_edge (loop->latch)->flags = 0;
end = make_edge (loop->latch, ex_bb, EDGE_FALLTHRU);
for (phi = phi_nodes (ex_bb); phi; phi = PHI_CHAIN (phi))
{
res = PHI_RESULT (phi);
gcc_assert (!is_gimple_reg (phi));
t = SSA_NAME_DEF_STMT (PHI_ARG_DEF_FROM_EDGE (phi, exit));
add_phi_arg (phi, PHI_ARG_DEF_FROM_EDGE (t, loop_preheader_edge (loop)),
guard);
add_phi_arg (phi, PHI_ARG_DEF_FROM_EDGE (t, loop_latch_edge (loop)),
end);
}
e = redirect_edge_and_branch (exit, nexit->dest);
PENDING_STMT (e) = NULL;
/* Emit OMP_FOR. */
TREE_OPERAND (cond, 0) = cvar_base;
type = TREE_TYPE (cvar);
t = build_omp_clause (OMP_CLAUSE_SCHEDULE);
OMP_CLAUSE_SCHEDULE_KIND (t) = OMP_CLAUSE_SCHEDULE_STATIC;
for_stmt = make_node (OMP_FOR);
TREE_TYPE (for_stmt) = void_type_node;
OMP_FOR_CLAUSES (for_stmt) = t;
OMP_FOR_INIT (for_stmt) = build_gimple_modify_stmt (initvar, cvar_init);
OMP_FOR_COND (for_stmt) = cond;
OMP_FOR_INCR (for_stmt) = build_gimple_modify_stmt (cvar_base,
build2 (PLUS_EXPR, type,
cvar_base,
build_int_cst
(type, 1)));
OMP_FOR_BODY (for_stmt) = NULL_TREE;
OMP_FOR_PRE_BODY (for_stmt) = NULL_TREE;
bsi = bsi_last (for_bb);
bsi_insert_after (&bsi, for_stmt, BSI_NEW_STMT);
SSA_NAME_DEF_STMT (initvar) = for_stmt;
/* Emit OMP_CONTINUE. */
bsi = bsi_last (loop->latch);
t = build2 (OMP_CONTINUE, void_type_node, cvar_next, cvar);
bsi_insert_after (&bsi, t, BSI_NEW_STMT);
SSA_NAME_DEF_STMT (cvar_next) = t;
/* Emit OMP_RETURN for OMP_FOR. */
bsi = bsi_last (ex_bb);
bsi_insert_after (&bsi, make_node (OMP_RETURN), BSI_NEW_STMT);
return paral_bb;
}
/* Generates code to execute the iterations of LOOP in N_THREADS threads in
parallel. NITER describes number of iterations of LOOP.
REDUCTION_LIST describes the reductions existant in the LOOP. */
static void
gen_parallel_loop (struct loop *loop, htab_t reduction_list,
unsigned n_threads, struct tree_niter_desc *niter)
{
struct loop *nloop;
loop_iterator li;
tree many_iterations_cond, type, nit;
tree stmts, arg_struct, new_arg_struct;
basic_block parallel_head;
struct clsn_data clsn_data;
unsigned prob;
/* From
---------------------------------------------------------------------
loop
{
IV = phi (INIT, IV + STEP)
BODY1;
if (COND)
break;
BODY2;
}
---------------------------------------------------------------------
with # of iterations NITER (possibly with MAY_BE_ZERO assumption),
we generate the following code:
---------------------------------------------------------------------
if (MAY_BE_ZERO
|| NITER < MIN_PER_THREAD * N_THREADS)
goto original;
BODY1;
store all local loop-invariant variables used in body of the loop to DATA.
OMP_PARALLEL (OMP_CLAUSE_NUM_THREADS (N_THREADS), LOOPFN, DATA);
load the variables from DATA.
OMP_FOR (IV = INIT; COND; IV += STEP) (OMP_CLAUSE_SCHEDULE (static))
BODY2;
BODY1;
OMP_CONTINUE;
OMP_RETURN -- OMP_FOR
OMP_RETURN -- OMP_PARALLEL
goto end;
original:
loop
{
IV = phi (INIT, IV + STEP)
BODY1;
if (COND)
break;
BODY2;
}
end:
*/
/* Create two versions of the loop -- in the old one, we know that the
number of iterations is large enough, and we will transform it into the
loop that will be split to loop_fn, the new one will be used for the
remaining iterations. */
type = TREE_TYPE (niter->niter);
nit = force_gimple_operand (unshare_expr (niter->niter), &stmts, true,
NULL_TREE);
if (stmts)
bsi_insert_on_edge_immediate (loop_preheader_edge (loop), stmts);
many_iterations_cond =
fold_build2 (GE_EXPR, boolean_type_node,
nit, build_int_cst (type, MIN_PER_THREAD * n_threads));
many_iterations_cond
= fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
invert_truthvalue (unshare_expr (niter->may_be_zero)),
many_iterations_cond);
many_iterations_cond
= force_gimple_operand (many_iterations_cond, &stmts, false, NULL_TREE);
if (stmts)
bsi_insert_on_edge_immediate (loop_preheader_edge (loop), stmts);
if (!is_gimple_condexpr (many_iterations_cond))
{
many_iterations_cond
= force_gimple_operand (many_iterations_cond, &stmts,
true, NULL_TREE);
if (stmts)
bsi_insert_on_edge_immediate (loop_preheader_edge (loop), stmts);
}
initialize_original_copy_tables ();
/* We assume that the loop usually iterates a lot. */
prob = 4 * REG_BR_PROB_BASE / 5;
nloop = loop_version (loop, many_iterations_cond, NULL,
prob, prob, REG_BR_PROB_BASE - prob, true);
update_ssa (TODO_update_ssa);
free_original_copy_tables ();
/* Base all the induction variables in LOOP on a single control one. */
canonicalize_loop_ivs (loop, reduction_list, nit);
/* Ensure that the exit condition is the first statement in the loop. */
transform_to_exit_first_loop (loop, reduction_list, nit);
/* Generate intializations for reductions. */
if (htab_elements (reduction_list) > 0)
htab_traverse (reduction_list, initialize_reductions, loop);
/* Eliminate the references to local variables from the loop. */
eliminate_local_variables (loop);
/* In the old loop, move all variables non-local to the loop to a structure
and back, and create separate decls for the variables used in loop. */
separate_decls_in_loop (loop, reduction_list, &arg_struct, &new_arg_struct, &clsn_data);
/* Create the parallel constructs. */
parallel_head = create_parallel_loop (loop, create_loop_fn (), arg_struct,
new_arg_struct, n_threads);
if (htab_elements (reduction_list) > 0)
create_call_for_reduction (loop, reduction_list, &clsn_data);
scev_reset ();
/* Cancel the loop (it is simpler to do it here rather than to teach the
expander to do it). */
cancel_loop_tree (loop);
/* Free loop bound estimations that could contain references to
removed statements. */
FOR_EACH_LOOP (li, loop, 0)
free_numbers_of_iterations_estimates_loop (loop);
/* Expand the parallel constructs. We do it directly here instead of running
a separate expand_omp pass, since it is more efficient, and less likely to
cause troubles with further analyses not being able to deal with the
OMP trees. */
omp_expand_local (parallel_head);
}
/* Returns true when LOOP contains vector phi nodes. */
static bool
loop_has_vector_phi_nodes (struct loop *loop)
{
unsigned i;
basic_block *bbs = get_loop_body_in_dom_order (loop);
bool res = true;
tree phi;
for (i = 0; i < loop->num_nodes; i++)
for (phi = phi_nodes (bbs[i]); phi; phi = PHI_CHAIN (phi))
if (TREE_CODE (TREE_TYPE (PHI_RESULT (phi))) == VECTOR_TYPE)
goto end;
res = false;
end:
free (bbs);
return res;
}
/* Detect parallel loops and generate parallel code using libgomp
primitives. Returns true if some loop was parallelized, false
otherwise. */
bool
parallelize_loops (void)
{
unsigned n_threads = flag_tree_parallelize_loops;
bool changed = false;
struct loop *loop;
struct tree_niter_desc niter_desc;
loop_iterator li;
htab_t reduction_list;
/* Do not parallelize loops in the functions created by parallelization. */
if (parallelized_function_p (cfun->decl))
return false;
reduction_list = htab_create (10, reduction_info_hash,
reduction_info_eq, free);
FOR_EACH_LOOP (li, loop, 0)
{
htab_empty (reduction_list);
if (/* Do not bother with loops in cold areas. */
!maybe_hot_bb_p (loop->header)
/* Or loops that roll too little. */
|| expected_loop_iterations (loop) <= n_threads
/* And of course, the loop must be parallelizable. */
|| !can_duplicate_loop_p (loop)
|| loop_has_blocks_with_irreducible_flag (loop)
/* FIXME: the check for vector phi nodes could be removed. */
|| loop_has_vector_phi_nodes (loop)
|| !loop_parallel_p (loop, reduction_list, &niter_desc))
continue;
changed = true;
gen_parallel_loop (loop, reduction_list, n_threads, &niter_desc);
verify_flow_info ();
verify_dominators (CDI_DOMINATORS);
verify_loop_structure ();
verify_loop_closed_ssa ();
}
htab_delete (reduction_list);
return changed;
}
#include "gt-tree-parloops.h"
|
FLA_queue_omp.c | /*
Copyright (C) 2014, The University of Texas at Austin
This file is part of libflame and is available under the 3-Clause
BSD license, which can be found in the LICENSE file at the top-level
directory, or at http://opensource.org/licenses/BSD-3-Clause
*/
/*
Here is the usage for FLAME workqueuing:
FLA_Queue_init();
FLA_Part( ... );
while( ... )
{
FLA_Repart( ... );
ENQUEUE_FLA_Gemm( ... );
FLA_Cont_with( ... );
}
FLA_Queue_exec();
FLA_Queue_finalize();
*/
#include "FLAME.h"
#if FLA_MULTITHREADING_MODEL == FLA_OPENMP
#include "FLA_queue_omp.h"
#include "FLA_task_partitioning.h"
#include <assert.h>
#include <stdarg.h>
#include <omp.h>
FLA_Queue tq;
static int fla_queue_n_threads = 1;
static int fla_queue_initialized = 0;
void FLA_Queue_exec()
{
// If the queue is empty, return early.
if( tq.n_tasks == 0 )
return;
//FLA_queue_print_costs();
FLA_queue_exec_sync();
}
void FLA_queue_print_costs()
{
int i;
for( i = 0; i < tq.n_tasks; ++i )
{
fprintf( stdout, "cost of task %2d: %e\n", i, tq.task_array[i]->cost ); fflush(stdout);
}
}
void FLA_queue_exec_sync()
{
int i;
int n_tasks;
FLA_Task** task_array;
FLA_Task* t;
// The queue is full, so we may now create an array index of each task.
FLA_queue_create_task_array();
// Sort the array if it is not sorted already
FLA_queue_sort_task_array();
//FLA_queue_print_costs();
// Copy the task_array pointer and n_tasks integer locally to make the
// OpenMP compiler happy.
n_tasks = tq.n_tasks;
task_array = tq.task_array;
// Iterate over the task queue using the random-access array.
#pragma omp parallel for \
shared( task_array, n_tasks ) \
private( i, t ) \
schedule( dynamic, 1 )
for( i = n_tasks - 1; i >= 0; --i )
//for( i = 0; i < n_tasks; ++i )
{
t = task_array[i];
FLA_queue_exec_task( t );
}
// Flush the queue. To do this, we walk the task_array and free() each
// element.
FLA_queue_flush();
// Now that we're done with the task array, we can free it.
FLA_queue_free_task_array();
}
void FLA_queue_exec_task( FLA_Task* t )
{
// Define local function pointer types.
typedef FLA_Error(*fla_gemm_p)(FLA_Trans transa, FLA_Trans transb, FLA_Obj alpha, FLA_Obj A, FLA_Obj B, FLA_Obj beta, FLA_Obj C);
typedef FLA_Error(*fla_symm_p)(FLA_Side side, FLA_Uplo uplo, FLA_Obj alpha, FLA_Obj A, FLA_Obj B, FLA_Obj beta, FLA_Obj C);
typedef FLA_Error(*fla_syrk_p)(FLA_Uplo uplo, FLA_Trans transa, FLA_Obj alpha, FLA_Obj A, FLA_Obj beta, FLA_Obj C);
typedef FLA_Error(*fla_syr2k_p)(FLA_Uplo uplo, FLA_Trans transa, FLA_Obj alpha, FLA_Obj A, FLA_Obj B, FLA_Obj beta, FLA_Obj C);
typedef FLA_Error(*fla_trmm_p)(FLA_Side side, FLA_Uplo uplo, FLA_Trans trans, FLA_Diag diag, FLA_Obj alpha, FLA_Obj A, FLA_Obj C);
typedef FLA_Error(*fla_trsm_p)(FLA_Side side, FLA_Uplo uplo, FLA_Trans trans, FLA_Diag diag, FLA_Obj alpha, FLA_Obj A, FLA_Obj C);
// Now "switch" between the various possible task functions.
// FLA_Gemm
if ( t->func == (void*)FLA_Gemm_external )
{
fla_gemm_p func;
// Here we unpack our void* function pointer and typecast it appropriately.
func = (fla_gemm_p) t->func;
// Invoke FLA_Gemm()
func( ( FLA_Trans ) t->int_arg[0],
( FLA_Trans ) t->int_arg[1],
( FLA_Obj ) t->fla_arg[0],
( FLA_Obj ) t->fla_arg[1],
( FLA_Obj ) t->fla_arg[2],
( FLA_Obj ) t->fla_arg[3],
( FLA_Obj ) t->fla_arg[4] );
}
// FLA_Symm
else if( t->func == (void*)FLA_Symm_external )
{
fla_symm_p func;
// Here we unpack our void* function pointer and typecast it appropriately.
func = (fla_symm_p) t->func;
// Invoke FLA_Symm()
func( ( FLA_Side ) t->int_arg[0],
( FLA_Uplo ) t->int_arg[1],
( FLA_Obj ) t->fla_arg[0],
( FLA_Obj ) t->fla_arg[1],
( FLA_Obj ) t->fla_arg[2],
( FLA_Obj ) t->fla_arg[3],
( FLA_Obj ) t->fla_arg[4] );
}
// FLA_Syrk
else if( t->func == (void*)FLA_Syrk_external )
{
fla_syrk_p func;
// Here we unpack our void* function pointer and typecast it appropriately.
func = (fla_syrk_p) t->func;
// Invoke FLA_Syrk()
func( ( FLA_Uplo ) t->int_arg[0],
( FLA_Trans ) t->int_arg[1],
( FLA_Obj ) t->fla_arg[0],
( FLA_Obj ) t->fla_arg[1],
( FLA_Obj ) t->fla_arg[2],
( FLA_Obj ) t->fla_arg[3] );
}
// FLA_Syr2k
else if( t->func == (void*)FLA_Syr2k_external )
{
fla_syr2k_p func;
// Here we unpack our void* function pointer and typecast it appropriately.
func = (fla_syr2k_p) t->func;
// Invoke FLA_Syr2k()
func( ( FLA_Uplo ) t->int_arg[0],
( FLA_Trans ) t->int_arg[1],
( FLA_Obj ) t->fla_arg[0],
( FLA_Obj ) t->fla_arg[1],
( FLA_Obj ) t->fla_arg[2],
( FLA_Obj ) t->fla_arg[3],
( FLA_Obj ) t->fla_arg[4] );
}
// FLA_Trmm
else if( t->func == (void*)FLA_Trmm_external )
{
fla_trmm_p func;
// Here we unpack our void* function pointer and typecast it appropriately.
func = (fla_trmm_p) t->func;
// Invoke FLA_Trmm()
func( ( FLA_Side ) t->int_arg[0],
( FLA_Uplo ) t->int_arg[1],
( FLA_Trans ) t->int_arg[2],
( FLA_Diag ) t->int_arg[3],
( FLA_Obj ) t->fla_arg[0],
( FLA_Obj ) t->fla_arg[1],
( FLA_Obj ) t->fla_arg[2] );
}
// FLA_Trsm
else if( t->func == (void*)FLA_Trsm_external )
{
fla_trsm_p func;
// Here we unpack our void* function pointer and typecast it appropriately.
func = (fla_trsm_p) t->func;
// Invoke FLA_Trsm()
func( ( FLA_Side ) t->int_arg[0],
( FLA_Uplo ) t->int_arg[1],
( FLA_Trans ) t->int_arg[2],
( FLA_Diag ) t->int_arg[3],
( FLA_Obj ) t->fla_arg[0],
( FLA_Obj ) t->fla_arg[1],
( FLA_Obj ) t->fla_arg[2] );
}
else
{
FLA_Check_error_code( FLA_NOT_YET_IMPLEMENTED );
}
}
void FLA_Queue_init()
{
// Exit early if we're already initialized.
if( fla_queue_initialized == 1 )
return;
// Initialze the basic fields of FLA_Queue.
tq.n_tasks = 0;
tq.head = NULL;
tq.tail = NULL;
// Set the initialized flag.
fla_queue_initialized = 1;
}
void FLA_Queue_finalize()
{
// Exit early if we're not already initialized.
if( fla_queue_initialized == 0 )
return;
// Make sure the queue is empty.
assert( tq.n_tasks == 0 );
// Clear the initialized flag.
fla_queue_initialized = 0;
}
void FLA_Queue_set_num_threads( int n_threads )
{
char env_var_str[32];
char nth_str[32];
int overwrite = 1;
// Set the number of threads using the OpenMP interface.
omp_set_num_threads( n_threads );
// Also keep that value locally.
fla_queue_n_threads = n_threads;
// Also, try to set the OMP_NUM_THREADS environment variable.
//sprintf( nth_str, "OMP_NUM_THREADS=%d", n_threads );
//putenv( nth_str );
sprintf( env_var_str, "OMP_NUM_THREADS" );
sprintf( nth_str, "%d", n_threads );
setenv( env_var_str, nth_str, overwrite );
}
int FLA_Queue_get_num_threads()
{
return fla_queue_n_threads;
}
void FLA_Queue_push( void* func, double cost, int n_int_params, int n_fla_params, ... )
{
FLA_Task* t;
int i;
va_list var_arg_list;
// Make sure we've initialized the queue
if( fla_queue_initialized == 0 )
FLA_Queue_init();
// Allocate a new FLA_Task and populate its fields with appropriate values.
t = FLA_task_alloc_init( func, cost, n_int_params, n_fla_params );
// Initialize variable argument environment. In case you're wondering, the
// second argument in this macro invocation of va_start() is supposed to be
// the parameter that immediately preceeds the variable argument list
// (ie: the ... above ).
va_start( var_arg_list, n_fla_params );
// Extract the integer arguments.
for( i = 0; i < n_int_params; ++i )
{
t->int_arg[i] = va_arg( var_arg_list, int );
}
// Extract the FLA_Obj arguments.
for( i = 0; i < n_fla_params; ++i )
{
t->fla_arg[i] = va_arg( var_arg_list, FLA_Obj );
}
// Finalize the variable argument environment.
va_end( var_arg_list );
// DON'T USE _push() if you need the task_array()! It doesn't exist yet!
// Push a pointer to the task structure onto the task queue.
FLA_queue_push_unsorted( t );
}
void FLA_queue_push_unsorted( FLA_Task* t )
{
// Add the task to the tail of the queue (and the head if the queue is empty).
if( tq.n_tasks == 0 )
{
tq.head = t;
tq.tail = t;
}
else
{
tq.tail->next_task = t;
tq.tail = t;
}
// Increment the number of tasks.
++tq.n_tasks;
}
void FLA_queue_insert_sorted( FLA_Task* t )
{
FLA_Task* t_curr;
FLA_Task* t_prev;
// If the queue is empty, then add the task to the head of the queue and return
// early. Notice that we don't need to maintain a tail for a sorted queue.
if( tq.n_tasks == 0 )
{
tq.head = t;
}
// If the cost of the incoming task is greater than the cost of the head task,
// then insert the task at the head of the queue, and return.
else if( t->cost >= tq.head->cost )
{
t->next_task = tq.head;
tq.head = t;
}
else
{
// Initialize our pointers. Notice that we have to keep track of the current
// task as well as the previous task since the linked list is only linked in
// one direction.
t_prev = tq.head;
t_curr = tq.head->next_task;
// Iterate through the task queue. The only way to terminate is through inserting
// the task, either somewhere in the middle, or at the tail of the linked list.
while( 1 )
{
// If we reach the end of the linked list, then we insert the task at
// the end and break out of the while loop.
if( t_curr == NULL )
{
t_prev->next_task = t;
break;
}
// If the cost of the incoming task is greater than the cost of the current
// task, then insert the task before the current task (and after the previous
// task), and break out of the while loop.
if( t->cost >= t_curr->cost )
{
t->next_task = t_curr;
t_prev->next_task = t;
break;
}
// Advance our pointers.
t_prev = t_curr;
t_curr = t_curr->next_task;
}
}
// Increment the number of tasks.
++tq.n_tasks;
}
void FLA_queue_create_task_array()
{
int i;
FLA_Task* t_curr;
// Allocate memory for the array of FLA_Task pointers.
tq.task_array = (FLA_Task**)malloc( tq.n_tasks * sizeof(FLA_Task*) );
assert( tq.task_array != NULL );
// Initialize index and FLA_Task* variables.
t_curr = tq.head;
i = 0;
// Iterate over linked list of tasks.
while( i < tq.n_tasks )
{
// Copy the current task pointer to the task array.
tq.task_array[i] = t_curr;
// Increment task array index and advance FLA_Task pointer.
t_curr = t_curr->next_task;
++i;
}
}
void FLA_queue_sort_task_array()
{
void* task_array;
size_t task_array_length;
size_t task_pointer_size;
// Typecast the task array to a void pointer. Notice that the task array
// must already be created or else we're sorting a NULL array!
task_array = (void*)tq.task_array;
// Set the length of the task array and the size of each element.
task_array_length = tq.n_tasks;
task_pointer_size = sizeof(FLA_Task*);
// Invoke quicksort from the standard C library.
qsort( task_array, task_array_length, task_pointer_size,
FLA_queue_task_cost_compare );
}
int FLA_queue_task_cost_compare( const void* t0, const void* t1 )
{
FLA_Task* task0;
FLA_Task* task1;
double diff;
int r_val;
task0 = *((FLA_Task**)t0);
task1 = *((FLA_Task**)t1);
diff = task0->cost - task1->cost;
r_val = (int)( diff/fabs(diff) );
return r_val;
}
void FLA_queue_free_task_array()
{
// Assert that the task array exists.
assert( tq.task_array != NULL );
// Free the task array.
free( tq.task_array );
// Clear the task_array field.
tq.task_array = NULL;
}
void FLA_queue_flush()
{
int i;
for( i = 0; i < tq.n_tasks; ++i )
{
// Free the current task.
FLA_task_free( tq.task_array[i] );
}
// Clear the other fields of the FLA_Queue structure.
tq.n_tasks = 0;
tq.head = NULL;
tq.tail = NULL;
}
FLA_Task* FLA_task_alloc_init( void* func, double cost, int n_int_args, int n_fla_args )
{
FLA_Task* t;
// Allocate space for the task structure t.
t = (FLA_Task*)malloc( sizeof(FLA_Task) );
assert( t != NULL );
// Allocate space for the task's integer arguments.
t->int_arg = (int*)malloc( n_int_args * sizeof(int) );
assert( t->int_arg != NULL );
// Allocate space for the task's FLA_Obj arguments.
t->fla_arg = (FLA_Obj*)malloc( n_fla_args * sizeof(FLA_Obj) );
assert( t->fla_arg != NULL );
// Initialize other fields of the structure.
t->cost = cost;
t->func = func;
t->n_int_args = n_int_args;
t->n_fla_args = n_fla_args;
t->next_task = NULL;
// Return a pointer to the initialized structure.
return t;
}
void FLA_task_free( FLA_Task* t )
{
// Free the int_arg field of t.
free( t->int_arg );
// Free the fla_arg field of t.
free( t->fla_arg );
// Finally, free the struct itself.
free( t );
}
#endif
|
ompBD3.c | #include <R.h>
#include <stdint.h>
#include <omp.h>
#define min(A,B) ((A) < (B) ? (A) : (B))
#define max(A,B) ((A) > (B) ? (A) : (B))
#define Data(i,j) data[(j) * (*row) + (i)] //R uses column-major order
static double *minVec, *maxVec, *twoMin, *twoMax;
static uint64_t *count2, *count3;
static void countBD(int *row, int *col, double *data) {
unsigned f1, f2, f3, i, j;
omp_set_num_threads(omp_get_max_threads());
for (f1 = 0; f1 < *row - 1; f1++) {
for (f2 = f1 + 1; f2 < *row; f2++) {
for (j = 0; j < *col; j++) {
twoMin[j] = min(Data(f1, j), Data(f2, j));
twoMax[j] = max(Data(f1, j), Data(f2, j));
}
#pragma omp parallel for private(i,j)
for (i = 0; i < *row; i++) {
for (j = 0; j < *col; j++)
if (Data(i, j) < twoMin[j] || Data(i, j) > twoMax[j])
break;
if (j == (*col))
count2[i]++;
}
for (f3 = f2 + 1; f3 < *row; f3++) {
for (j = 0; j < *col; j++) {
minVec[j] = min(twoMin[j], Data(f3,j));
maxVec[j] = max(twoMax[j], Data(f3,j));
}
#pragma omp parallel for private(i,j)
for (i = 0; i < *row; i++) {
for (j = 0; j < *col; j++)
if (Data(i,j) < minVec[j] || Data(i,j) > maxVec[j])
break;
if (j == *col)
count3[i]++;
}
}
}
}
}
void ompBD3(int *row, int *col, double *data, double *depth) {
unsigned i;
count2 = (uint64_t*)malloc(sizeof(uint64_t) * (*row));
count3 = (uint64_t*)malloc(sizeof(uint64_t) * (*row));
twoMin = (double*)malloc(sizeof(double) * (*col));
twoMax = (double*)malloc(sizeof(double) * (*col));
minVec = (double*)malloc(sizeof(double) * (*col));
maxVec = (double*)malloc(sizeof(double) * (*col));
for (i = 0; i < *row; i++)
count2[i] = count3[i] = 0;
countBD(row, col, data);
for (i = 0; i < *row; i++)
depth[i] = (double)count2[i] / (*row * (*row - 1.0) / 2.0) +
(double)count3[i] / (*row * (*row - 1.0) * (*row - 2.0) / 6.0);
free(count2);
free(count3);
free(twoMin);
free(twoMax);
free(minVec);
free(maxVec);
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
// And has the following additional copyright:
//
// (C) Copyright 2016-2020 Xilinx, Inc.
// All Rights Reserved.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class VersionTuple;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
mutable IdentifierInfo *Ident_instancetype;
/// \brief Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// \brief Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// \brief Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// \brief Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// \brief Identifier for "message".
IdentifierInfo *Ident_message;
/// \brief Identifier for "strict".
IdentifierInfo *Ident_strict;
/// \brief Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> XlxHLSHandler;
std::unique_ptr<PragmaHandler> XlxhlsHandler;
std::unique_ptr<PragmaHandler> XlxAPHandler;
std::unique_ptr<PragmaHandler> XlxapHandler;
std::unique_ptr<PragmaHandler> XlxAUTOPILOTHandler;
std::unique_ptr<PragmaHandler> XlxautopilotHandler;
std::unique_ptr<PragmaHandler> ModelComposerHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// \brief When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// \brief RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
unsigned getDepth() const { return Depth; }
};
/// Factory object for creating AttributeList objects.
AttributeFactory AttrFactory;
/// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// \brief Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren;
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square;
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace;
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// \brief Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// \brief Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed);
PP.Lex(Tok);
PP.EnterToken(Next);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount)
--ParenCount; // Don't let unbalanced )'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount)
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount)
--BraceCount; // Don't let unbalanced }'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// \brief Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// \brief Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// \brief Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// \brief Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// \brief Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// \brief Initialize all pragma handlers.
void initializePragmaHandlers();
/// \brief Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// \brief Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// \brief Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// \brief Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// \brief Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// \brief Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// \brief Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// \brief Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// \brief Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// \brief Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// \brief Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// \brief Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// \brief Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// \brief Handle the annotation token produced for
/// #pragma HLS|AP|AUTOPILOT ...
void HandleXlxPragma();
static bool HasDataflowAttribute(AttributeList *List);
void RemoveDataflowAttribute(ParsedAttributes &From);
struct ParsedAttributesWithRange;
void SinkParsedHLSUnrollPragmas(ParsedAttributesWithRange &To, Scope *P);
void SinkLabelAttributes(ParsedAttributesWithRange &To,
ParsedAttributesWithRange &From,
const Token &IdentTok);
/// \brief Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// \brief Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// \brief Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// \brief Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind
TryAnnotateName(bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC1);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// \brief Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// \brief The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// \brief The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// \brief Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit(ParsedAttributes *ScopeAttr = nullptr) {
if (Self) {
Self->ExitScope(ScopeAttr);
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope(ParsedAttributes *ScopeAttr = nullptr);
private:
/// \brief RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// \brief Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// \brief Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// othewise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// \brief The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// \brief Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// \brief Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// \brief Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// othewise, it is a tag declaration.
bool TemplateScope : 1;
/// \brief Whether this class is an __interface.
bool IsInterface : 1;
/// \brief The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// \brief The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// \brief RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// \brief Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// \brief Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// \brief The kind of template we are parsing.
enum {
/// \brief We are not parsing a template at all.
NonTemplate = 0,
/// \brief We are parsing a template declaration.
Template,
/// \brief We are parsing an explicit specialization.
ExplicitSpecialization,
/// \brief We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// \brief The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// \brief The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// \brief The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// \brief Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
AttributeList *AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers& VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives();
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseHLSVariableExpression(StringRef optionName, bool noVoid=true);
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(
SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> Completer = llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false);
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro,
bool *SkippedInits = nullptr);
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
ExprResult ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while condition expression.
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
bool AllowOpenMPStandalone = false);
enum AllowedConstructsKind {
/// \brief Allow any declarations, statements, OpenMP directives.
ACK_Any,
/// \brief Allow only statements and non-standalone OpenMP directives.
ACK_StatementsOpenMPNonStandalone,
/// \brief Allow statements and all executable OpenMP directives
ACK_StatementsOpenMPAnyExecutable
};
StmtResult
ParseStatementOrDeclaration(StmtVector &Stmts, AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement();
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs);
StmtResult ParseCaseStatement(bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement();
StmtResult ParseDataflowCompoundStatement();
StmtResult
ParseCompoundStatement(bool isStmtExpr = false,
ParsedAttributesWithRange *ScopeAttr = nullptr);
StmtResult
ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags,
ParsedAttributesWithRange *ScopeAttr = nullptr);
bool CheckLBraceForDataflowLoopBody();
void ParseCompoundStatementLeadingPragmas();
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(ParsedAttributesWithRange &ScopeAttr,
SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement(ParsedAttributesWithRange &ScopeAttr);
StmtResult ParseForStatement(ParsedAttributesWithRange &ScopeAttr,
SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
AllowedConstructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// \brief Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// \brief Parse the block; this code is always used.
IEB_Parse,
/// \brief Skip the block entirely; this code is never used.
IEB_Skip,
/// \brief Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// \brief Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// \brief The location of the initial keyword.
SourceLocation KeywordLoc;
/// \brief Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// \brief Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// \brief The name we're looking for.
UnqualifiedId Name;
/// \brief The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
AccessSpecifier& CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// \brief When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// \brief Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// \brief Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// \brief Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// \brief Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// \brief Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// \brief Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
Error ///< Can't be any of the above!
};
/// \brief Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// \brief Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *HasMissingTypename = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// \brief Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (!attrs.Range.isValid()) return;
DiagnoseProhibitedAttributes(attrs, FixItLoc);
attrs.clear();
}
void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// \brief Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// \brief Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// \brief Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax,
Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// \brief Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLLoopAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL || getLangOpts().HLSExt)
return ParseOpenCLLoopAttribute(Attrs);
return true;
}
/// \brief Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLLoopAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
void ParseXCLDependenceAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseXCLArrayViewAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
void ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
std::vector<IdentifierInfo *> &Ident,
std::vector<SourceLocation> &NamespaceLoc,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
SourceLocation TemplateKWLoc;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = TemplateKWLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, AttributeList *Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// \brief Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// \brief Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// \brief Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// \brief Parses declarative or executable directive.
///
/// \param Allowed ACK_Any, if any directives are allowed,
/// ACK_StatementsOpenMPAnyExecutable - if any executable directives are
/// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone
/// executable directives are allowed.
///
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(AllowedConstructsKind Allowed);
/// \brief Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// \brief Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// \brief Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// \brief Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// \brief Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// \brief Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
CXXScopeSpec ReductionIdScopeSpec;
DeclarationNameInfo ReductionId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation& TemplateKWLoc,
UnqualifiedId &Result);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none,
AttributeList *AccessAttrs = nullptr);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
AccessSpecifier AS,
AttributeList *AccessAttrs);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams,
SourceLocation &DeclEnd,
AccessSpecifier AS=AS_none,
AttributeList *AccessAttrs = nullptr);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl();
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
GB_unaryop__minv_int32_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_int32_fp64
// op(A') function: GB_tran__minv_int32_fp64
// C type: int32_t
// A type: double
// cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32)
// unaryop: cij = GB_IMINV_SIGNED (aij, 32)
#define GB_ATYPE \
double
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 32) ;
// casting
#define GB_CASTING(z, aij) \
int32_t z ; GB_CAST_SIGNED(z,aij,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_int32_fp64
(
int32_t *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_int32_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lda.h | #ifndef SRC_MODEL_LDA_LDA_H_
#define SRC_MODEL_LDA_LDA_H_
#include <mpi.h>
#include <omp.h>
#include <vector>
#include <random>
#include <algorithm>
#include <chrono>
#include <thread>
#include <mutex>
#include <deque>
#include <fstream>
#include <string>
#include "engine/dcm.h"
#include "engine/types.h"
#include "util/guide_table.h"
#include "util/xorshift.h"
#include "util/thread_local.h"
#include "util/hash_table.h"
#include "util/distributions.h"
#include "glog/logging.h"
using std::vector;
using std::pair;
inline bool compare(const SpEntry &x, const SpEntry &y) {
return x.v > y.v;
}
class LDA {
public :
TTopic K;
vector<TProb> alpha;
TProb beta, alphaBar, betaBar;
/// notice : log_likelihood need double precision to work correctly
TLikehood log_likelihood;
vector<TLikehood> llthread;
ThreadLocal<xorshift> generators;
ThreadLocal<vector<TProb>> phis;
GuideTable prior1Table;
vector<TProb> priorCwk;
vector<TProb> prior1Prob;
TProb prior1Sum;
ThreadLocal<GuideTable> prior2Table;
ThreadLocal<vector<TTopic>> prior2NNZ;
ThreadLocal<vector<TProb>> prior2Prob;
ThreadLocal<vector<TProb>> probs;
UniformRealDistribution<TProb> u01;
TIter iter;
CVA<int> &corpus;
// MPI
TId process_size, process_id, monitor_id;
TLen thread_size;
TCount num_words, num_docs;
vector<TCount> word_per_doc;
TCount doc_split_size, word_split_size;
vector<TProb> inv_ck;
DCMSparse cwk;
DCMSparse cdk;
LocalMergeStyle local_merge_style;
size_t global_token_number;
TCount global_word_number;
// count the word frequency belong to this node
vector<TCount> word_frequency;
vector<TCount> local_word_frequency, global_word_frequency;
LDA(TIter iter, TTopic K, TProb alpha, TProb beta, CVA<int> &corpus,
const TId process_size, const TId process_id, const TLen thread_size,
const TCount num_docs, const TCount num_words,
const TCount doc_split_size, const TCount word_split_size,
LocalMergeStyle local_merge_style)
: K(K), alpha(K, alpha), beta(beta), alphaBar(alpha * K),
iter(iter), corpus(corpus), process_size(process_size),
process_id(process_id), thread_size(thread_size),
num_docs(num_docs), num_words(num_words),
doc_split_size(doc_split_size), word_split_size(word_split_size),
local_merge_style(local_merge_style),
cwk(word_split_size, doc_split_size, num_words, K,
column_partition, process_size, process_id, thread_size,
local_merge_style, 0),
cdk(doc_split_size, word_split_size, num_docs, K, row_partition,
process_size, process_id, thread_size, local_merge_style, 0) {
/*
printf("pid %d LDA constructor row_size : %d, column_size : %d, process_size : %d, process_id : %d, thread_size : %d\n",
process_id, cwk.row_size, cwk.column_size, cwk.process_size, cwk.process_id, cwk.thread_size);
printf("pid %d LDA constructor row_head : %d, row_tail : %d\n", cwk.process_id, cwk.row_head, cwk.row_tail);
*/
MPI_Comm doc_partition;
MPI_Comm_split(MPI_COMM_WORLD, process_id / word_split_size,
process_id, &doc_partition);
TCount local_word_number = num_words;
MPI_Allreduce(&local_word_number, &global_word_number, 1,
MPI_INT, MPI_SUM, doc_partition);
betaBar = beta * global_word_number;
word_per_doc.resize(num_docs);
llthread.resize(thread_size);
inv_ck.resize(K);
priorCwk.resize(K);
prior1Prob.resize(K);
size_t local_token_number = corpus.size() / sizeof(int);
MPI_Allreduce(&local_token_number, &global_token_number, 1,
MPI_UNSIGNED_LONG_LONG, MPI_SUM, MPI_COMM_WORLD);
// Initialize generators
std::random_device rd;
for (auto &gen : generators) gen.seed(rd(), rd());
u01 = decltype(u01)(0, 1, generators.Get(0));
word_frequency.resize(num_words);
local_word_frequency.resize(num_words);
global_word_frequency.resize(num_words);
monitor_id = 0;
}
virtual void Estimate();
virtual ~LDA() { }
void iterWord();
void outputTopicWord(vector<SpEntry> &topic_word,
vector<TIndex>wordmap,
int frequent_word_number) {
for (TIndex local_w = 0; local_w < num_words; ++local_w) {
auto sparse_row = cwk.row(local_w);
for (auto entry : sparse_row) {
TTopic topic = entry.k;
TCount cnt = entry.v;
for (TIndex i = 0; i < frequent_word_number; ++i) {
TTopic offset = topic * frequent_word_number + i;
if (cnt > topic_word[offset].v) {
topic_word[offset].k = wordmap[local_w];
topic_word[offset].v = cnt;
break;
}
}
}
}
/*
* code backup for debug
ofstream fout("/home/yama/btm/BigTopicModel/data/nips.wf-tail." + to_string(process_id));
for (TIndex word = 0; word < num_words; ++word) {
fout << wordmap[word] << " " << word_frequency[word] << "\n";
}
fout << endl;
for (TIndex topic = 0; topic < K; ++topic) {
std::sort(ltw[topic].begin(), ltw[topic].end(), compare);
fout << ltw[topic].size() << " : ";
for (auto entry: ltw[topic])
fout << wordmap[entry.k] << " " << entry.v << ",\t";
fout << endl;
}
fout.close();
*/
}
void corpusStat(vector<TIndex>wordmap, string prefix) {
// #pragma omp parallel for
for (TWord v = 0; v < num_words; v++) {
auto row = corpus.Get(v);
local_word_frequency[v] = row.size();
}
MPI_Comm word_partition;
MPI_Comm_split(MPI_COMM_WORLD, process_id % word_split_size,
process_id, &word_partition);
MPI_Allreduce(local_word_frequency.data(),
global_word_frequency.data(),
global_word_frequency.size(),
MPI_INT, MPI_SUM, word_partition);
// show the orig word frequency
ofstream fout(prefix + ".wf-head." + to_string(process_id));
for (TIndex word = 0; word < num_words; ++word) {
fout << wordmap[word] << " " << global_word_frequency[word] << "\n";
}
fout.close();
}
};
#endif // SRC_MODEL_LDA_LDA_H_
|
sageInterface_modified.h | #ifndef ROSE_SAGE_INTERFACE
#define ROSE_SAGE_INTERFACE
#include "sage3basic.hhh"
#include <stdint.h>
#include <utility>
#include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
#if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference
SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project );
#else
SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project );
#endif
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "rewrite.h"
#endif
// DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser.
#include "astUnparseAttribute.h"
#include <set>
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "LivenessAnalysis.h"
#include "abstract_handle.h"
#include "ClassHierarchyGraph.h"
#endif
// DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h
//! A global function for getting the string associated with an enum (which is defined in global scope)
ROSE_DLL_API std::string getVariantName (VariantT v);
// DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE
// This namespace is specific to interface functions that operate on the Sage III AST.
// The name was chosen so as not to conflict with other classes within ROSE.
// This will become the future home of many interface functions which operate on
// the AST and which are generally useful to users. As a namespace multiple files can be used
// to represent the compete interface and different developers may contribute interface
// functions easily.
// Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008)
// We could add simpler layers of support for construction of IR nodes by
// hiding many details in "makeSg***()" functions. Such functions would
// return pointers to the associated Sg*** objects and would be able to hide
// many IR specific details, including:
// memory handling
// optional parameter settings not often required
// use of Sg_File_Info objects (and setting them as transformations)
//
// namespace AST_Interface (this name is taken already by some of Qing's work :-)
//! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode()
#define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode()
//------------------------------------------------------------------------
/*! \brief This namespace is to organize functions that are useful when operating on the AST.
\defgroup frontendSageUtilityFunctions SAGE III utility functions(SageInterface)
\ingroup ROSE_FrontEndGroup
The Sage III IR design attempts to be minimalist. Thus additional functionality is
intended to be presented using separate higher level interfaces which work with the IR.
The namespace, SageInterface, collects functions that operate on the IR and are supportive of numerous types of routine operations required to support general analysis and transformation of the AST.
\internal Further organization of the functions in this namespace is required.
Major AST manipulation functions are scattered in the following directories
- src/midend/astUtil/astInterface
- src/roseSupport/utility_function.h, namespace Rose
- src/roseSupport/TransformationSupport.h, class TransformationSupport
- src/midend/astInlining/inlinerSupport.C
- src/frontend/SageIII/sageInterface
- projects: such as outliner, OpenMP_Translator
Some other utility functions not related AST can be found in
- src/util/stringSupport/string_functions.h, namespace StringUtility
- src/roseExtensions/dataStructureTraversal/helpFunctions.C
- projects/dataStructureGraphing/helpFunctions.C
\todo A number of additional things to do:
- Pull scope handling out of EDG/Sage III translation so that is is made
available to anyone else building the Sage III IR from scratch (which
when it gets non-trivial, involves the manipulation of scopes).
- Other stuff ...
*/
namespace SageInterface
{
// DQ (4/3/2014): Added general AST support seperate from the AST.
// Container and API for analysis information that is outside of the AST and as a result
// prevents frequent modification of the IR.
class DeclarationSets
{
// DQ (4/3/2014): This stores all associated declarations as a map of sets.
// the key to the map is the first nondefining declaration and the elements of the set are
// all of the associated declarations (including the defining declaration).
private:
//! Map of first-nondefining declaration to all other associated declarations.
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap;
public:
void addDeclaration(SgDeclarationStatement* decl);
const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl);
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap();
bool isLocatedInDefiningScope(SgDeclarationStatement* decl);
};
// DQ (4/3/2014): This constucts a data structure that holds analysis information about
// the AST that is seperate from the AST. This is intended to be a general mechanism
// to support analysis information without constantly modifing the IR.
DeclarationSets* buildDeclarationSets(SgNode*);
//! An internal counter for generating unique SgName
ROSE_DLL_API extern int gensym_counter;
// tps : 28 Oct 2008 - support for finding the main interpretation
SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file);
//! Get the unsigned value of a disassembled constant.
uint64_t getAsmConstant(SgAsmValueExpression* e);
//! Get the signed value of a disassembled constant.
int64_t getAsmSignedConstant(SgAsmValueExpression *e);
//! Function to add "C" style comment to statement.
void addMessageStatement( SgStatement* stmt, std::string message );
//! A persistent attribute to represent a unique name for an expression
class UniqueNameAttribute : public AstAttribute
{
private:
std::string name;
public:
UniqueNameAttribute(std::string n="") {name =n; };
void set_name (std::string n) {name = n;};
std::string get_name () {return name;};
};
// DQ (3/2/2009): Added support for collectiong an merging the referenced symbols in the outlined
// function into the list used to edit the outlined code subtree to fixup references (from symbols
// in the original file to the symbols in the newer separate file).
// typedef rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> ReplacementMapType;
// void supplementReplacementSymbolMap ( const ReplacementMapTraversal::ReplacementMapType & inputReplacementMap );
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
inline size_t hash_value(SgNode* t) {return (size_t)t;}
#endif
struct hash_nodeptr
{
// CH (4/9/2010): Use boost::hash instead
//#ifndef _MSC_VER
#if 0
//rose_hash::hash<char*> hasher;
#endif
public:
size_t operator()(SgNode* node) const
{
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
return (size_t) hash_value(node);
#else
return (size_t) node;
#endif
}
};
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void supplementReplacementSymbolMap ( rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> & inputReplacementMap );
#endif
//------------------------------------------------------------------------
//@{
/*! @name Symbol tables
\brief utility functions for symbol tables
*/
// Liao 1/22/2008, used for get symbols for generating variable reference nodes
// ! Find a variable symbol in current and ancestor scopes for a given name
ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
//! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL.
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList);
ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
// DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing.
//!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL);
// Liao, 1/24/2008, find exact match for a function
//!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName,
const SgType* t,
SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
// DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support).
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#if 0
// DQ (8/13/2013): This function does not make since any more, now that we have made the symbol
// table handling more precise and we have to provide template parameters for any template lookup.
// We also have to know if we want to lookup template classes, template functions, or template
// member functions (since each have specific requirements).
SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#endif
#if 0
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
// Where these are called we might not know enough information about the template parameters or function
// types, for example.
SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
#endif
// DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments.
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL);
ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL);
// DQ (7/17/2011): Added function from cxx branch that I need here for the Java support.
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope);
/*! \brief set_name of symbol in symbol table.
This function extracts the symbol from the relavant symbol table,
changes the name (at the declaration) and reinserts it into the
symbol table.
\internal I think this is what this function does, I need to double check.
*/
// DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName
// to this location where it can be a part of the interface for the Sage III AST.
ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name);
/*! \brief Output function type symbols in global function type symbol table.
*/
void outputGlobalFunctionTypeSymbolTable ();
// DQ (6/27/2005):
/*! \brief Output the local symbol tables.
\implementation Each symbol table is output with the file infor where it is located in the source code.
*/
ROSE_DLL_API void outputLocalSymbolTables (SgNode * node);
class OutputLocalSymbolTables:public AstSimpleProcessing
{
public:
void visit (SgNode * node);
};
/*! \brief Regenerate the symbol table.
\implementation current symbol table must be NULL pointer before calling this
function (for safety, but is this a good idea?)
*/
// DQ (9/28/2005):
void rebuildSymbolTable (SgScopeStatement * scope);
/*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted.
*/
void clearUnusedVariableSymbols (SgNode* root = NULL);
// DQ (3/1/2009):
//! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table.
void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help );
//@}
//------------------------------------------------------------------------
//@{
/*! @name Stringify
\brief Generate a useful string (name) to describe a SgNode
*/
/*! \brief Generate a useful name to describe the SgNode
\internal default names are used for SgNode objects that can not be associated with a name.
*/
// DQ (9/21/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgNode * node);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgStatement * stmt);
/*! \brief Generate a useful name to describe the expression
\internal default names are used for expressions that can not be associated with a name.
*/
std::string get_name (const SgExpression * expr);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgDeclarationStatement * declaration);
/*! \brief Generate a useful name to describe the scope
\internal default names are used for scope that cannot be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgScopeStatement * scope);
/*! \brief Generate a useful name to describe the SgSymbol
\internal default names are used for SgSymbol objects that cannot be associated with a name.
*/
// DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support).
std::string get_name (const SgSymbol * symbol);
/*! \brief Generate a useful name to describe the SgType
\internal default names are used for SgType objects that cannot be associated with a name.
*/
std::string get_name (const SgType * type);
/*! \brief Generate a useful name to describe the SgSupport IR node
*/
std::string get_name (const SgSupport * node);
/*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node
*/
std::string get_name (const SgLocatedNodeSupport * node);
/*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node
*/
std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive );
/*! \brief Generate a useful name to describe the SgToken IR node
*/
std::string get_name ( const SgToken* token );
//@}
//------------------------------------------------------------------------
//@{
/*! @name Class utilities
\brief
*/
/*! \brief Get the default destructor from the class declaration
*/
// DQ (6/21/2005): Get the default destructor from the class declaration
SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration *
classDeclaration);
/*! \brief Get the default constructor from the class declaration
*/
// DQ (6/22/2005): Get the default constructor from the class declaration
ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration *
classDeclaration);
/*! \brief Return true if template definition is in the class, false if outside of class.
*/
// DQ (8/27/2005):
bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl
* memberFunctionDeclaration);
/*! \brief Generate a non-defining (forward) declaration from a defining function declaration.
\internal should put into sageBuilder ?
*/
// DQ (9/17/2005):
SgTemplateInstantiationMemberFunctionDecl*
buildForwardFunctionDeclaration
(SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! Check if a SgNode is a declaration for a structure
bool isStructDeclaration(SgNode * node);
//! Check if a SgNode is a declaration for a union
bool isUnionDeclaration(SgNode * node);
#if 0
// DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration
// (so that it can handle template functions and member functions)
/*! \brief Return true if member function of a template member function,
of false if a non-template member function in a templated class.
*/
// DQ (8/27/2005):
bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl *
memberFunctionDeclaration);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Misc.
\brief Not sure the classifications right now
*/
// DQ (2/12/2012): Added some diagnostic support.
//! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened.
void whereAmI(SgNode* node);
//! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp".
std::string extractPragmaKeyword(const SgPragmaDeclaration *);
//! Check if a node is SgOmp*Statement
ROSE_DLL_API bool isOmpStatement(SgNode* );
/*! \brief Return true if function is overloaded.
*/
// DQ (8/27/2005):
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
// DQ (2/14/2012): Added support function used for variable declarations in conditionals.
//! Support function used for variable declarations in conditionals
void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body);
//! Support function used for variable declarations in conditionals
void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body);
//! Support function used for variable declarations in conditionals
void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body);
//! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute")
void annotateExpressionsWithUniqueNames (SgProject* project);
//! Check if a SgNode is a main() function declaration
ROSE_DLL_API bool isMain (const SgNode* node);
// DQ (6/22/2005):
/*! \brief Generate unique name from C and C++ constructs. The name may contain space.
This is support for the AST merge, but is generally useful as a more general mechanism than
name mangling which is more closely ties to the generation of names to support link-time function name
resolution. This is more general than common name mangling in that it resolves more relevant differences
between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;").
\implementation current work does not support expressions.
*/
std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations);
/** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter.
* @param baseName the word to be included in the variable names. */
std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp");
// DQ (8/10/2010): Added const to first parameter.
// DQ (3/10/2007):
//! Generate a unique string from the source file position information
std::string declarationPositionString (const SgDeclarationStatement * declaration);
// DQ (1/20/2007):
//! Added mechanism to generate project name from list of file names
ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false );
//! Given a SgExpression that represents a named function (or bound member
//! function), return the mentioned function
SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func);
//! Get the mask expression from the header of a SgForAllStatement
SgExpression* forallMaskExpression(SgForAllStatement* stmt);
//! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t
void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t);
// DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation).
/*! \brief Support for faster mangled name generation (caching avoids recomputation).
*/
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void clearMangledNameCache (SgGlobal * globalScope);
void resetMangledNameCache (SgGlobal * globalScope);
#endif
std::string getMangledNameFromCache (SgNode * astNode);
std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName);
SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically
//! Used to have a struct declaration embedded into a variable declaration
void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl);
// DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the
// bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration );
//! Check if a defining declaration comes before of after the non-defining declaration.
bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration);
// DQ (10/19/2006): Function calls have interesting context dependent rules to determine if
// they are output with a global qualifier or not. Were this is true we have to avoid global
// qualifiers, since the function's scope has not been defined. This is an example of where
// qualification of function names in function calls are context dependent; an interesting
// example of where the C++ language is not friendly to source-to-source processing :-).
bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall);
/*! \brief Compute the intersection set for two ASTs.
This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST.
*/
ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL);
//! Deep copy an arbitrary subtree
ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree);
//! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e);
template <typename NodeType>
NodeType* deepCopy (const NodeType* subtree) {
return dynamic_cast<NodeType*>(deepCopyNode(subtree));
}
//! Deep copy an expression
ROSE_DLL_API SgExpression* copyExpression(SgExpression* e);
//!Deep copy a statement
ROSE_DLL_API SgStatement* copyStatement(SgStatement* s);
// from VarSym.cc in src/midend/astOutlining/src/ASTtools
//! Get the variable symbol for the first initialized name of a declaration stmt.
ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl);
//! Get the first initialized name of a declaration statement
ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl);
//! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now.
ROSE_DLL_API void myRemoveStatement(SgStatement* stmt);
ROSE_DLL_API bool isConstantTrue(SgExpression* e);
ROSE_DLL_API bool isConstantFalse(SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e);
//! Check if a declaration has a "static' modifier
bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt);
//! Set a declaration as static
ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt);
//! Check if a declaration has an "extern" modifier
ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt);
//! Set a declaration as extern
ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt);
//! Interface for creating a statement whose computation writes its answer into
//! a given variable.
class StatementGenerator {
public:
virtual ~StatementGenerator() {};
virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0;
};
//! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc)
//!
//! Return the left hand, right hand expressions and if the left hand variable is also being read
bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL);
//! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. This function will convert them all to a top level SgInitializedName.
ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current);
//! Build an abstract handle from an AST node, reuse previously built handle when possible
ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*);
//! Obtain a matching SgNode from an abstract handle string
ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string);
//! Dump information about a SgNode for debugging
ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc="");
//! Reorder a list of declaration statements based on their appearance order in source files
ROSE_DLL_API std::vector<SgDeclarationStatement*>
sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec);
// DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names.
//! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc.
// bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp );
bool isPrefixOperator( SgExpression* exp );
//! Check for proper names of possible prefix operators (used in isPrefixOperator()).
bool isPrefixOperatorName( const SgName & functionName );
//! Is an overloaded operator a postfix operator. (e.g. ).
bool isPostfixOperator( SgExpression* exp );
//! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()).
bool isIndexOperator( SgExpression* exp );
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST properties
\brief version, language properties of current AST.
*/
// std::string version(); // utility_functions.h, version number
/*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use!
*/
ROSE_DLL_API bool is_C_language ();
ROSE_DLL_API bool is_OpenMP_language ();
ROSE_DLL_API bool is_UPC_language ();
//! Check if dynamic threads compilation is used for UPC programs
ROSE_DLL_API bool is_UPC_dynamic_threads();
ROSE_DLL_API bool is_C99_language ();
ROSE_DLL_API bool is_Cxx_language ();
ROSE_DLL_API bool is_Java_language ();
ROSE_DLL_API bool is_Fortran_language ();
ROSE_DLL_API bool is_CAF_language ();
ROSE_DLL_API bool is_PHP_language();
ROSE_DLL_API bool is_Python_language();
ROSE_DLL_API bool is_Cuda_language();
ROSE_DLL_API bool is_X10_language();
ROSE_DLL_API bool is_binary_executable();
ROSE_DLL_API bool is_mixed_C_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language ();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Scope
\brief
*/
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Assigns unique numbers to each SgScopeStatement of a function.
This is used to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void resetScopeNumbers (SgFunctionDefinition * functionDeclaration);
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Clears the cache of scope,integer pairs for the input function.
This is used to clear the cache of computed unique labels for scopes in a function.
This function should be called after any transformation on a function that might effect
the allocation of scopes and cause the existing unique numbers to be incorrect.
This is part of support to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void clearScopeNumbers (SgFunctionDefinition * functionDefinition);
//!Find the enclosing namespace of a declaration
SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration);
// SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
//!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor)
bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Preprocessing Information
\brief #if-#else-#end, comments, #include, etc
*/
//! Dumps a located node's preprocessing information.
void dumpPreprocInfo (SgLocatedNode* locatedNode);
//! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX.
ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL);
//! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon.
ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position.
ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef,
PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation.
ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf);
//!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo()
ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf);
//! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes.
ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target,
const std::string & text,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before);
//!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on.
ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Source File Position
\brief set Sg_File_Info for a SgNode
*/
//! Build and attach comment, comment style is inferred from the language type of the target node if not provided
ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before,
PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration);
// DQ (11/25/2009): Added matching support for adding comments to SgAsm nodes.
// Build and attach comment
// void attachComment(SgAsmStatement* target, const std::string & content );
// DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface
//! Add a string to be unparsed to support code generation for back-end specific tools or compilers.
ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation );
// ************************************************************************
// Newer versions of now depricated functions
// ************************************************************************
// DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder
// interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This
// function is the only function that should be called directly (though in a namespace we can't define permissions).
//! Set the source code positon for the current (input) node.
ROSE_DLL_API void setSourcePosition(SgNode* node);
// A better name might be "setSourcePositionForSubTree"
//! Set the source code positon for the subtree (including the root).
ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root);
// DQ (5/1/2012): New function with improved name (still preserving the previous interface).
// This function is not required once the new mechanism defining a source position mode is complete (shortly).
//! Set subtree as a transformation.
// void setSourcePositionAtRootAndAllChildrenAsTransformation(SgNode *root);
// void setSourcePositionAtRootAndAllChildrenAsDefault(SgNode *root);
// Removed to force use of the API and permit flexability in the lower level implementation.
//! DQ (5/1/2012): New function with improved name.
// void setSourcePositionToDefault( SgLocatedNode* locatedNode );
template<class T> void setSourcePositionToDefault( T* node );
//! DQ (5/1/2012): New function with improved name.
void setSourcePositionAsTransformation(SgNode *node);
// DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability).
void setSourcePositionPointersToNull(SgNode *node);
// ************************************************************************
// ************************************************************************
// Older deprecated functions
// ************************************************************************
// Liao, 1/8/2007, set file info. for a whole subtree as transformation generated
//! Set current node's source position as transformation generated
ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node);
//! Set current node's source position as NULL
ROSE_DLL_API void setOneSourcePositionNull(SgNode *node);
//! Recursively set source position info(Sg_File_Info) as transformation generated
ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root);
//! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool
ROSE_DLL_API void setSourcePositionForTransformation_memoryPool();
//! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage.
// ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode);
// ************************************************************************
//@}
//------------------------------------------------------------------------
//@{
/*! @name Data types
\brief
*/
// from src/midend/astInlining/typeTraits.h
// src/midend/astUtil/astInterface/AstInterface.h
//! Get the right bool type according to C or C++ language input
SgType* getBoolType(SgNode* n);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
////!
////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//!Get the data type of the first initialized name of a declaration statement
ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl);
//! Is a type default constructible? This may not quite work properly.
ROSE_DLL_API bool isDefaultConstructible(SgType* type);
//! Is a type copy constructible? This may not quite work properly.
ROSE_DLL_API bool isCopyConstructible(SgType* type);
//! Is a type assignable? This may not quite work properly.
ROSE_DLL_API bool isAssignable(SgType* type);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//! Check if a class type is a pure virtual class. True means that there is at least
//! one pure virtual function that has not been overridden.
//! In the case of an incomplete class type (forward declaration), this function returns false.
ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy);
#endif
//! Does a type have a trivial (built-in) destructor?
ROSE_DLL_API bool hasTrivialDestructor(SgType* t);
//! Is this type a non-constant reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isNonconstReference(SgType* t);
//! Is this type a const or non-const reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isReferenceType(SgType* t);
//! Is this type a pointer type? (Handles typedefs correctly)
ROSE_DLL_API bool isPointerType(SgType* t);
//! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to
//! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile,
//! it returns false for (int const * x) and (int const * const x) because these types point to a const int.
//! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns
//! false for const (int * const * x)
ROSE_DLL_API bool isPointerToNonConstType(SgType* type);
//! Is this a const type?
/* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char.
* Similarly, neither for const int b[10]; or const int & c =10;
* The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of
the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type".
*/
ROSE_DLL_API bool isConstType(SgType* t);
//! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers.
SgType* removeConst(SgType* t);
//! Is this a volatile type?
ROSE_DLL_API bool isVolatileType(SgType* t);
//! Is this a restrict type?
ROSE_DLL_API bool isRestrictType(SgType* t);
//! Is this a scalar type?
/*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary
*/
ROSE_DLL_API bool isScalarType(SgType* t);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
//!
//! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool.
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//! Check if a type is a struct type (a special SgClassType in ROSE)
ROSE_DLL_API bool isStructType(SgType* t);
//! Generate a mangled string for a given type based on Itanium C++ ABI
ROSE_DLL_API std::string mangleType(SgType* type);
//! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE
ROSE_DLL_API std::string mangleScalarType(SgType* type);
//! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types.
ROSE_DLL_API std::string mangleModifierType(SgModifierType* type);
//! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array.
ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t);
//! Get the number of dimensions of an array type
ROSE_DLL_API int getDimensionCount(SgType* t);
//! Get the element type of an array
ROSE_DLL_API SgType* getArrayElementType(SgType* t);
//! Get the element type of an array, pointer or string, or NULL if not applicable
ROSE_DLL_API SgType* getElementType(SgType* t);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// Note, the first entry of the array is a SgNullExpression, iff the
/// first array dimension was not specified.
/// \code
/// int x[] = { 1, 2, 3 };
/// \endcode
/// note, the expression does not have to be a constant
/// \code
/// int x[i*5];
/// \endcode
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \param varref a reference to an array variable (the variable of type arrtype)
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// If the first array dimension was not specified an expression
/// that indicates that size is generated.
/// \code
/// int x[][3] = { 1, 2, 3, 4, 5, 6 };
/// \endcode
/// the entry for the first dimension will be:
/// \code
/// // 3 ... size of 2nd dimension
/// sizeof(x) / (sizeof(int) * 3)
/// \endcode
/// \pre arrtype is the array-type of varref
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
/// \post !isSgNullExpression(return-value[*])
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref);
/// \overload
/// \note see get_C_array_dimensions for SgVarRefExp for details.
/// \todo make initname const
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname);
//! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp.
ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL);
//! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information.
/*!
* Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense.
AST graph for some examples:
- shared scalar: SgModifierType -->base type
- shared array: SgArrayType --> SgModiferType --> base type
- shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt
- shared to private: SgModifierType --> SgPointerType --> base type
- private to shared: SgPointerType --> SgModifierType --> base type
*/
ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL );
//! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property.
/*!
* ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property.
*/
ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL);
//! Check if a modifier type is a UPC shared type.
ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type);
//! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array.
ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type);
//! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.)
ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type);
//! Get the block size of a UPC shared modifier type
ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type);
//! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays)
ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t);
//! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type.
ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t);
//! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first.
ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t);
//! Is a UPC array with dimension of X*THREADS
ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t);
//! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to.
ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL);
// DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types.
//! Get the type of the associated argument expression from the function type.
ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Loop handling
\brief
*/
// by Jeremiah
//! Add a step statement to the end of a loop body
//! Add a new label to the end of the loop, with the step statement after
//! it; then change all continue statements in the old loop body into
//! jumps to the label
//!
//! For example:
//! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes
//! while (a < 5) {if (a < -3) goto label; label: a++;}
ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step);
ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f);
ROSE_DLL_API void convertForToWhile(SgForStatement* f);
ROSE_DLL_API void convertAllForsToWhiles(SgNode* top);
//! Change continue statements in a given block of code to gotos to a label
ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label);
//!Return the loop index variable for a for loop
ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop);
//!Check if a SgInitializedName is used as a loop index within a AST subtree
//! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them.
ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root);
//! Routines to get and set the body of a loop
ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop);
ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body);
//! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop
ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop);
//! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop.
ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond);
//! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested
//!
//! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition
ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL);
//! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1
ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/);
//! Set the lower bound of a loop header for (i=lb; ...)
ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb);
//! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...)
ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub);
//! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc)
ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride);
//! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary
ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop);
//! Normalize a for loop, return true if successful. Generated constants will be fold by default.
//!
//! Translations are :
//! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..)
//! For test expression:
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
//! For increment expression:
//! i++ is normalized to i+=1 and
//! i-- is normalized to i+=-1
//! i-=s is normalized to i+= -s
ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true);
//!Normalize a Fortran Do loop. Make the default increment expression (1) explicit
ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop);
//! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor.
ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor);
//! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!).
ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder);
//! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s
ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize);
//Winnie Loop Collapsing
SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Topdown search
\brief Top-down traversal from current node to find a node of a specified type
*/
//! Query a subtree to get all nodes of a given type, with an appropriate downcast.
template <typename NodeType>
std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant)
{
Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant);
std::vector<NodeType*> result(nodes.size(), NULL);
int count = 0;
for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin();
i != nodes.end(); ++i, ++count) {
NodeType* node = dynamic_cast<NodeType*>(*i);
ROSE_ASSERT (node);
result[count] = node;
}
return result;
}
/*! \brief Returns STL vector of SgFile IR node pointers.
Demonstrates use of restricted traversal over just SgFile IR nodes.
*/
std::vector < SgFile * >generateFileList ();
/** Get the current SgProject IR Node.
*
* The library should never have more than one project and it asserts such. If no project has been created yet then this
* function returns the null pointer. */
ROSE_DLL_API SgProject * getProject();
//! Query memory pools to grab SgNode of a specified type
template <typename NodeType>
static std::vector<NodeType*> getSgNodeListFromMemoryPool()
{
// This function uses a memory pool traversal specific to the SgFile IR nodes
class MyTraversal : public ROSE_VisitTraversal
{
public:
std::vector<NodeType*> resultlist;
void visit ( SgNode* node)
{
NodeType* result = dynamic_cast<NodeType* > (node);
ROSE_ASSERT(result!= NULL);
if (result!= NULL)
{
resultlist.push_back(result);
}
};
virtual ~MyTraversal() {}
};
MyTraversal my_traversal;
NodeType::visitRepresentativeNode(my_traversal);
return my_traversal.resultlist;
}
/*! \brief top-down traversal from current node to find the main() function declaration
*/
ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode);
//! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another declaration statement
SgStatement* findLastDeclarationStatement(SgScopeStatement * scope);
//midend/programTransformation/partialRedundancyElimination/pre.h
//! Find referenced symbols within an expression
std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr);
//! Find break statements inside a particular statement, stopping at nested loops or switches
/*! loops or switch statements defines their own contexts for break
statements. The function will stop immediately if run on a loop or switch
statement. If fortranLabel is non-empty, breaks (EXITs) to that label within
nested loops are included in the returned list.
*/
std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = "");
//! Find all continue statements inside a particular statement, stopping at nested loops
/*! Nested loops define their own contexts for continue statements. The
function will stop immediately if run on a loop
statement. If fortranLabel is non-empty, continues (CYCLEs) to that label
within nested loops are included in the returned list.
*/
std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = "");
std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l);
std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw);
//! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag.
template <typename T>
T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining)
{
bool found = false;
if (!root) return 0;
T* decl = dynamic_cast<T*>(root);
if (decl!=NULL)
{
if (scope)
{
if ((decl->get_scope() == scope)&&
(decl->search_for_symbol_from_symbol_table()->get_name()==name))
{
found = true;
}
}
else // Liao 2/9/2010. We should allow NULL scope
{
if(decl->search_for_symbol_from_symbol_table()->get_name()==name)
{
found = true;
}
}
}
if (found)
{
if (isDefining)
{
ROSE_ASSERT (decl->get_definingDeclaration() != NULL);
return dynamic_cast<T*> (decl->get_definingDeclaration());
}
else
return decl;
}
std::vector<SgNode*> children = root->get_traversalSuccessorContainer();
for (std::vector<SgNode*>::const_iterator i = children.begin();
i != children.end(); ++i)
{
T* target= findDeclarationStatement<T> (*i,name, scope, isDefining);
if (target)
return target;
}
return 0;
}
//! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>.
SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining);
#if 0 //TODO
// 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX
// until reach the end node
SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
// 2. return all nodes of type VariantT following the source node
std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Bottom up search
\brief Backwards traverse through the AST to find a node, findEnclosingXXX()
*/
// remember to put const to all arguments.
/** Find a node by type using upward traversal.
*
* Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant
* ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the
* starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode.
*
* For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first
* non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining
* declaration is different than the first non-defining declaration.
*
* If no ancestor of the requisite type of subtypes is found then this function returns a null pointer.
*
* If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot
* be an enclosing node of the specified type. */
template <typename NodeType>
NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false)
{
#if 1
// DQ (10/20/2012): This is the older version of this implementation. Until I am sure that
// the newer version (below) is what we want to use I will resolve this conflict by keeping
// the previousl version in place.
if (NULL == astNode)
{
return NULL;
}
if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) )
{
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode));
}
// DQ (3/5/2012): Check for reference to self...
ROSE_ASSERT(astNode->get_parent() != astNode);
SgNode* parent = astNode->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
SgNode* previouslySeenParent = parent;
bool foundCycle = false;
while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode(): parent = %p = %s \n",parent,parent->class_name().c_str());
#endif
parent = parent->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
// ROSE_ASSERT(parent != previouslySeenParent);
if (parent == previouslySeenParent)
{
foundCycle = true;
}
}
#if 0
printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
parent = previouslySeenParent;
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p \n",declarationStatement);
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the non-defining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
#if 0
printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
// DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for
// debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However,
// this will have to be revisited later since it appears clear that it is a problem for the binary analysis
// work when it is visited for this case. Since the cycle is detected, but there is no assertion on the
// cycle, we don't exit when a cycle is identified (which is the point of the code below).
// Note also that I have fixed the code (above and below) to only chase pointers through defining
// declarations (where they exist), this is important since non-defining declarations can be almost
// anywhere (and thus chasing them can make it appear that there are cycles where there are none
// (I think); test2012_234.C demonstrates an example of this.
// DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work.
// if (foundCycle == true)
if (foundCycle == false)
{
while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str());
if (parent->get_file_info() != NULL)
parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug");
#endif
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null");
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
parent = parent->get_parent();
#if 1
// DQ (3/5/2012): Check for loops that will cause infinite loops.
ROSE_ASSERT(parent != previouslySeenParent);
#else
printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n");
if (parent == previouslySeenParent)
break;
#endif
}
}
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent));
#else
// DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below).
// Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop).
// Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const.
SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent());
std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles
while (node) {
if (NodeType *found = dynamic_cast<NodeType*>(node))
return found;
// FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09]
ROSE_ASSERT(seen.insert(node).second);
// Traverse to parent (declaration statements are a special case)
if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) {
SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) {
// DQ (10/19/2012): Use the defining declaration instead.
// node = firstNondefiningDeclaration;
node = definingDeclaration;
}
} else {
node = node->get_parent();
}
}
return NULL;
#endif
}
//! Find enclosing source file node
ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false);
//! Get the closest scope from astNode. Return astNode if it is already a scope.
ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode);
//! Get the enclosing scope from a node n
ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false);
//! Traverse back through a node's parents to find the enclosing global scope
ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode);
//! Find the function definition
ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false);
ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false);
//! Find the closest enclosing statement, including the given node
ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n);
//! Find the closest switch outside a given statement (normally used for case and default statements)
ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s);
//! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it
ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false);
//! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration.
ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false);
//roseSupport/utility_functions.h
//! get the SgFile node from current node
ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode );
//! Get the initializer containing an expression if it is within an initializer.
ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n);
//! Get the closest class definition enclosing the specified AST node,
ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false);
// TODO
#if 0
SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
SgVariableDeclaration* findVariableDeclaratin( const string& varname)
SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode);
// e.g. for some expression, find its parent statement
SgStatement* getEnclosingStatement(const SgNode* astNode);
SgSwitchStatement* getEnclosingSwitch(SgStatement* s);
SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode);
// used to build a variable reference for compiler generated code in current scope
SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Walk and Traversal
\brief
*/
// Liao, 1/9/2008
/*!
\brief return the first global scope under current project
*/
ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project);
/*!
\brief get the last statement within a scope, return NULL if it does not exit
*/
ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope);
//! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers.
ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false);
//!Find the first defining function declaration statement in a scope
ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope);
//! Get next statement within the same scope of current statement
ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt);
//! Get previous statement within the same scope of current statement
ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt);
#if 0 //TODO
// preorder traversal from current SgNode till find next SgNode of type V_SgXXX
SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Comparison
\brief Compare AST nodes, subtree, etc
*/
//! Check if a SgIntVal node has a given value
ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value);
//! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same.
/*!
* There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C
*/
ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2);
//! Check if a statement is the last statement within its closed scope
ROSE_DLL_API bool isLastStatement(SgStatement* stmt);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST insert, removal, and replacement
\brief Add, remove,and replace AST
scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc.
*/
// DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining).
//! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result.
ROSE_DLL_API void deleteAST(SgNode* node);
//! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only).
ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root);
// DQ (2/25/2009): Added new function to support outliner.
//! Move statements in first block to the second block (preserves order and rebuilds the symbol table).
ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock );
//! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc.
ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope);
//! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
// DQ (2/6/2009): Added function to support outlining into separate file.
//! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers).
ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles );
//! Prepend a statement to the beginning of the current scope, handling side
//! effects as appropriate
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! prepend a list of statements to the beginning of the current scope,
//! handling side effects as appropriate
ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
//! Check if a scope statement has a simple children statement list
//! so insert additional statements under the scope is straightforward and unambiguous .
//! for example, SgBasicBlock has a simple statement list while IfStmt does not.
ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope);
//! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically
ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before or after the target statement within the
//target's scope
ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true);
//! Insert a statement before a target statement
ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before a target statement
ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts);
//! Insert a statement after a target statement, Move around preprocessing info automatically by default
ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements after a target statement
ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt);
//! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope);
//! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope);
//! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements
// then the statement is inserted at the end of the scope.
ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope,
bool movePreprocessingInfo=true);
//! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements
//then the new statements are inserted at the end of the scope.
ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts,
SgScopeStatement *scope);
//! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()).
ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true);
//! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST()
ROSE_DLL_API void deepDelete(SgNode* root);
//! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested.
ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false);
//! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node.
ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern);
//! Replace all variable references to an old symbol in a scope to being references to a new symbol.
// Essentially replace variable a with b.
ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope );
/** Given an expression, generates a temporary variable whose initializer optionally evaluates
* that expression. Then, the var reference expression returned can be used instead of the original
* expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp;
* this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles
* reference types correctly by using pointer types for the temporary.
* @param expression Expression which will be replaced by a variable
* @param scope scope in which the temporary variable will be generated
* @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed
* @return declaration of the temporary variable, and a a variable reference expression to use instead of
* the original expression. */
std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression,
SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL);
/* This function creates a temporary variable for a given expression in the given scope
This is different from SageInterface::createTempVariableForExpression in that it does not
try to be smart to create pointers to reference types and so on. The tempt is initialized to expression.
The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration
may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage.
@param expression Expression which will be replaced by a variable
@param scope scope in which the temporary variable will be generated
*/
std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression
(SgExpression* expression, SgScopeStatement* scope);
//! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible
/*! We recommend to build SgFunctionParameterList before building a function declaration
However, it is still allowed to append new arguments for existing function declarations.
\todo function type , function symbol also need attention.
*/
ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*);
//!Prepend an argument to SgFunctionParameterList
ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*);
//! Append an expression to a SgExprListExp, set the parent pointer also
ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*);
//! Append an expression list to a SgExprListExp, set the parent pointers also
ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&);
//! Set parameter list for a function declaration, considering existing parameter list etc.
// void setParameterList(SgFunctionDeclaration *func,SgFunctionParameterList *paralist);
template <class actualFunction>
ROSE_DLL_API void setParameterList(actualFunction *func,SgFunctionParameterList *paralist);
# if 1
// DQ (11/25/2011): Moved to the header file so that it could be seen as a template function.
// TODO consider the difference between C++ and Fortran
// fixup the scope of arguments,no symbols for nondefining function declaration's arguments
template <class actualFunction>
void
// SageInterface::setParameterList(SgFunctionDeclaration * func,SgFunctionParameterList * paralist)
setParameterList(actualFunction* func, SgFunctionParameterList* paralist)
{
// DQ (11/25/2011): Modified this to be a templated function so that we can handle both
// SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member
// function derived classes).
ROSE_ASSERT(func != NULL);
ROSE_ASSERT(paralist != NULL);
#if 0
// At this point we don't have cerr and endl defined, so comment this code out.
// Warn to users if a paralist is being shared
if (paralist->get_parent() !=NULL)
{
cerr << "Waring! Setting a used SgFunctionParameterList to function: "
<< (func->get_name()).getString()<<endl
<< " Sharing parameter lists can corrupt symbol tables!"<<endl
<< " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl;
// ROSE_ASSERT(false);
}
#endif
// Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!!
if (func->get_parameterList() != NULL)
{
if (func->get_parameterList() != paralist)
{
delete func->get_parameterList();
}
}
func->set_parameterList(paralist);
paralist->set_parent(func);
// DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node.
// This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C,
// test2012_81.C and testcode2012_82.C demonstrate this problem.
SgInitializedNamePtrList & args = paralist->get_args();
for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++)
{
(*i)->set_declptr(func);
}
}
#endif
//! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer.
ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma);
//! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept.
ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false);
//! Replace a given expression with a list of statements produced by a generator
ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Similar to replaceExpressionWithStatement, but with more restrictions.
//! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in
ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc.
ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand);
//!set left hand operand for binary expressions, transparently downcasting target expressions when necessary
ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs);
//!set left hand operand for binary expression
ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs);
//! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly.
ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top);
// DQ (1/25/2010): Added support for directories
//! Move file to be generated in a subdirectory (will be generated by the unparser).
ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file );
//! Supporting function to comment relocation in insertStatement() and removeStatement().
ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement);
//! Relocate comments and CPP directives from one statement to another.
ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST repair, fix, and postprocessing.
\brief Mostly used internally when some AST pieces are built without knowing their target
scope/parent, especially during bottom-up construction of AST. The associated symbols,
parent and scope pointers cannot be set on construction then.
A set of utility functions are provided to
patch up scope, parent, symbol for them when the target scope/parent become know.
*/
//! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed.
/*! In AST translation, it is possible to build a variable reference before the variable
is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders
to get the work done. Users should call fixVariableReference() when AST is complete and all
variable declarations are in place.
*/
ROSE_DLL_API int fixVariableReferences(SgNode* root);
//!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known.
/*!
It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general.
In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement().
*/
ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope.
ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope.
ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope.
ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope);
//! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL.
ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope);
//! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed.
ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value);
//! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope
ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope);
//! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST.
ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope);
//@}
//! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope.
/*! This function not only set the defining and nondefining links of the newly introduced
* function declaration inside a scope, but also update other same function declarations' links
* accordingly if there are any.
* Assumption: The function has already inserted/appended/prepended into the scope before calling this function.
*/
ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope);
//------------------------------------------------------------------------
//@{
/*! @name Advanced AST transformations, analyses, and optimizations
\brief Some complex but commonly used AST transformations.
*/
//! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++
ROSE_DLL_API bool
collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false);
//!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement.
ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars);
//!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement.
ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars);
//!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement.
ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols);
//! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref);
//! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//!Call liveness analysis on an entire project
ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false);
//!get liveIn and liveOut variables for a for loop from liveness analysis result liv.
ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts);
#endif
//!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types.
ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, VariantT> > & results);
//! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations!
/*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */
ROSE_DLL_API void constantFolding(SgNode* r);
//!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements and return expressions with side effects. Return the number of statements inserted.
/*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement.
*/
ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s);
//! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments.
ROSE_DLL_API void removeJumpsToNextStatement(SgNode*);
//! Remove labels which are not targets of any goto statements
ROSE_DLL_API void removeUnusedLabels(SgNode* top);
//! Remove consecutive labels
ROSE_DLL_API void removeConsecutiveLabels(SgNode* top);
//! Replace an expression with a temporary variable and an assignment statement
/*!
Add a new temporary variable to contain the value of 'from'
Change reference to 'from' to use this new variable
Assumptions: 'from' is not within the test of a loop or 'if'
not currently traversing 'from' or the statement it is in
*/
ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = "");
//! Split long expressions into blocks of statements
ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr);
//! Remove labeled goto statements
ROSE_DLL_API void removeLabeledGotos(SgNode* top);
//! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label.
ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch);
//! Check if the body of a 'for' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs);
//! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs);
//! Check if the body of a 'while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws);
//! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws);
//! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws);
//! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs);
//! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs);
//! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs);
//! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true);
//! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos);
//! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt);
//! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while,
//! switch, If, Catch, OmpBodyStmt, etc
bool isBodyStatement (SgStatement* s);
//! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them.
void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true);
//! The same as changeAllBodiesToBlocks(SgNode* top). To be phased out.
void changeAllLoopBodiesToBlocks(SgNode* top);
//! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc.
SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt);
#if 0
/** If s is the body of a loop, catch, or if statement and is already a basic block,
* s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent
* (a loop, catch, or if statement, etc). */
SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s);
#endif
//! Get the constant value from a constant integer expression; abort on
//! everything else. Note that signed long longs are converted to unsigned.
unsigned long long getIntegerConstantValue(SgValueExp* expr);
//! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace.
std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt );
//! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned.
SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp);
//! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref.
SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL);
/// \brief moves the body of a function f to a new function f`;
/// f's body is replaced with code that forwards the call to f`.
/// \return a pair indicating the statement containing the call of f`
/// and an initialized name refering to the temporary variable
/// holding the result of f`. In case f returns void
/// the initialized name is NULL.
/// \param definingDeclaration the defining function declaration of f
/// \param newName the name of function f`
/// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; }
/// for functions returning void and a value, respectively.
/// two function declarations are inserted in f's enclosing scope
/// \code
/// result_type f`(...); <--- (1)
/// result_type f (...) { forward call to f` }
/// result_type f`(...) { original code } <--- (2)
/// \endcode
/// Calls to f are not updated, thus in the transformed code all
/// calls will continue calling f (this is also true for
/// recursive function calls from within the body of f`).
/// After the function has created the wrapper,
/// definingDeclaration becomes the wrapper function
/// The definition of f` is the next entry in the
/// statement list; the forward declaration of f` is the previous
/// entry in the statement list.
/// \pre definingDeclaration must be a defining declaration of a
/// free standing function.
/// typeid(SgFunctionDeclaration) == typeid(definingDeclaration)
/// i.e., this function is NOT implemented for class member functions,
/// template functions, procedures, etc.
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName);
/// \overload
/// \tparam NameGen functor that generates a new name based on the old name.
/// interface: SgName nameGen(const SgName&)
/// \param nameGen name generator
/// \brief see wrapFunction for details
template <class NameGen>
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen)
{
return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name()));
}
/// \brief convenience function that returns the first initialized name in a
/// list of variable declarations.
SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl);
//@}
// DQ (6/7/2012): Unclear where this function should go...
bool hasTemplateSyntax( const SgName & name );
//! Move a declaration to a scope which is the closest to the declaration's use places
bool moveDeclarationToInnermostScope(SgDeclarationStatement* decl, bool debug/*= false */);
#if 0
//------------------------AST dump, stringify-----------------------------
//------------------------------------------------------------------------
std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h
// do we need these?
std::string dump_node(const SgNode* astNode);
std::string dump_tree(const SgNode* astNode);
// or a friendly version of unparseToString(), as a memeber function
std::string SgNode::toString(bool asSubTree=true); // dump node or subtree
//----------------------------AST comparison------------------------------
//------------------------------------------------------------------------
// How to get generic functions for comparison?
bool isNodeEqual(SgNode* node1, SgNode* node2); //?
bool isTreeEqual(SgNode* tree1, SgNode* tree2);
//! Are two expressions equal (using a deep comparison)?
bool expressionTreeEqual(SgExpression*, SgExpression*);
//! Are corresponding expressions in two lists equal (using a deep comparison)?
bool expressionTreeEqualStar(const SgExpressionPtrList&,
const SgExpressionPtrList&);
//----------------------AST verfication/repair----------------------------
//------------------------------------------------------------------------
// sanity check of AST subtree, any suggestions?
// TODO
verifySgNode(SgNode* node, bool subTree=true);
//src/midend/astDiagnostics/AstConsistencyTests.h
// AstTests::runAllTests(SgProject * )
//src/midend/astUtil/astInterface/AstInterface.h.C
//FixSgProject(SgProject &project)
//FixSgTree(SgNode* r)
//src/frontend/SageIII/astPostProcessing
//AstPostProcessing(SgNode * node)
//--------------------------AST modification------------------------------
//------------------------------------------------------------------------
// any operations changing AST tree, including
// insert, copy, delete(remove), replace
// insert before or after some point, argument list is consistent with LowLevelRewrite
void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true);
// previous examples
//void myStatementInsert(SgStatement* target,...)
// void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock)
// copy
// copy children of one basic block to another basic block
//void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b);
void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst);
// delete (remove) a node or a whole subtree
void removeSgNode(SgNode* targetNode); // need this?
void removeSgNodeTree(SgNode* subtree); // need this?
void removeStatement( SgStatement* targetStmt);
//Move = delete + insert
void moveAst (SgNode* src, SgNode* target); // need this?
// similar to
void moveStatements (SgBasicBlock* src, SgBasicBlock* target);
// replace= delete old + insert new (via building or copying)
// DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE.
// void replaceAst(SgNode* oldNode, SgNode* newNode);
//void replaceChild(SgNode* parent, SgNode* from, SgNode* to);
//bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n)
//--------------------------AST transformations---------------------------
//------------------------------------------------------------------------
// Advanced AST modifications through basic AST modifications
// Might not be included in AST utitlity list, but listed here for the record.
// extract statements/content from a scope
void flattenBlocks(SgNode* n);
//src/midend/astInlining/inlinerSupport.h
void renameVariables(SgNode* n);
void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition);
void simpleCopyAndConstantPropagation(SgNode* top);
void changeAllMembersToPublic(SgNode* n);
void removeVariableDeclaration(SgInitializedName* initname);
//! Convert something like "int a = foo();" into "int a; a = foo();"
SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init);
//! Rewrites a while or for loop so that the official test is changed to
//! "true" and what had previously been the test is now an if-break
//! combination (with an inverted condition) at the beginning of the loop
//! body
void pushTestIntoBody(LoopStatement* loopStmt);
//programTransformation/finiteDifferencing/finiteDifferencing.h
//! Move variables declared in a for statement to just outside that statement.
void moveForDeclaredVariables(SgNode* root);
//------------------------ Is/Has functions ------------------------------
//------------------------------------------------------------------------
// misc. boolean functions
// some of them could moved to SgXXX class as a member function
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
bool isSwitchCond (const SgStatement* s);
bool isIfCond (const SgStatement* s);
bool isWhileCond (const SgStatement* s);
bool isStdNamespace (const SgScopeStatement* scope);
bool isTemplateInst (const SgDeclarationStatement* decl);
bool isCtor (const SgFunctionDeclaration* func);
bool isDtor (const SgFunctionDeclaration* func);
// src/midend/astInlining/typeTraits.h
bool hasTrivialDestructor(SgType* t);
ROSE_DLL_API bool isNonconstReference(SgType* t);
ROSE_DLL_API bool isReferenceType(SgType* t);
// generic ones, or move to the SgXXX class as a member function
bool isConst(SgNode* node); // const type, variable, function, etc.
// .... and more
bool isConstType (const SgType* type);
bool isConstFunction (const SgFunctionDeclaration* decl);
bool isMemberVariable(const SgInitializedName & var);
//bool isMemberVariable(const SgNode& in);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
bool MayRedefined(SgExpression* expr, SgNode* root);
// bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h
bool hasAddressTaken(SgExpression* expr, SgNode* root);
//src/midend/astInlining/inlinerSupport.C
// can also classified as topdown search
bool containsVariableReference(SgNode* root, SgInitializedName* var);
bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var);
bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc,
SgInitializedName* toCheck,
SgInitializedName* lifetime)
//src/midend/programTransformation/partialRedundancyElimination/pre.h
bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n);
//------------------------ loop handling ---------------------------------
//------------------------------------------------------------------------
//get and set loop control expressions
// 0: init expr, 1: condition expr, 2: stride expr
SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt );
int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp);
bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref);
SgInitializedName * getLoopIndexVar(SgForStatement* forstmt);
//------------------------expressions-------------------------------------
//------------------------------------------------------------------------
//src/midend/programTransformation/partialRedundancyElimination/pre.h
int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root);
//src/midend/astInlining/replaceExpressionWithStatement.h
void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to);
void replaceSubexpressionWithStatement(SgExpression* from,
StatementGenerator* to);
SgExpression* getRootOfExpression(SgExpression* n);
//--------------------------preprocessing info. -------------------------
//------------------------------------------------------------------------
//! Removes all preprocessing information at a given position.
void cutPreprocInfo (SgBasicBlock* b,
PreprocessingInfo::RelativePositionType pos,
AttachedPreprocessingInfoType& save_buf);
//! Pastes preprocessing information at the front of a statement.
void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
//! Pastes preprocessing information at the back of a statement.
void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
/*!
* \brief Moves 'before' preprocessing information.
* Moves all preprocessing information attached 'before' the source
* statement to the front of the destination statement.
*/
// a generic one for all
/// void movePreprocessingInfo(src, dest, RelativePositionType);
void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest);
void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest);
void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest);
//--------------------------------operator--------------------------------
//------------------------------------------------------------------------
from transformationSupport.h, not sure if they should be included here
/* return enum code for SAGE operators */
operatorCodeType classifyOverloadedOperator(); // transformationSupport.h
/*! \brief generates a source code string from operator name.
This function returns a string representing the elementwise operator (for primative types)
that would be match that associated with the overloaded operator for a user-defined
abstractions (e.g. identifyOperator("operator+()") returns "+").
*/
std::string stringifyOperator (std::string name);
//--------------------------------macro ----------------------------------
//------------------------------------------------------------------------
std::string buildMacro ( std::string s ); //transformationSupport.h
//--------------------------------access functions---------------------------
//----------------------------------get/set sth.-----------------------------
// several categories:
* get/set a direct child/grandchild node or fields
* get/set a property flag value
* get a descendent child node using preorder searching
* get an ancestor node using bottomup/reverse searching
// SgName or string?
std::string getFunctionName (SgFunctionCallExp* functionCallExp);
std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression );
// do we need them anymore? or existing member functions are enought?
// a generic one:
std::string get_name (const SgNode* node);
std::string get_name (const SgDeclarationStatement * declaration);
// get/set some property: should moved to SgXXX as an inherent memeber function?
// access modifier
void setExtern (SgFunctionDeclartion*)
void clearExtern()
// similarly for other declarations and other properties
void setExtern (SgVariableDeclaration*)
void setPublic()
void setPrivate()
#endif
// DQ (1/23/2013): Added support for generated a set of source sequence entries.
std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode );
//--------------------------------Type Traits (C++)---------------------------
bool HasNoThrowAssign(const SgType * const inputType);
bool HasNoThrowCopy(const SgType * const inputType);
bool HasNoThrowConstructor(const SgType * const inputType);
bool HasTrivialAssign(const SgType * const inputType);
bool HasTrivialCopy(const SgType * const inputType);
bool HasTrivialConstructor(const SgType * const inputType);
bool HasTrivialDestructor(const SgType * const inputType);
bool HasVirtualDestructor(const SgType * const inputType);
bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType);
bool IsAbstract(const SgType * const inputType);
bool IsClass(const SgType * const inputType);
bool IsEmpty(const SgType * const inputType);
bool IsEnum(const SgType * const inputType);
bool IsPod(const SgType * const inputType);
bool IsPolymorphic(const SgType * const inputType);
bool IsStandardLayout(const SgType * const inputType);
bool IsLiteralType(const SgType * const inputType);
bool IsTrivial(const SgType * const inputType);
bool IsUnion(const SgType * const inputType);
SgType * UnderlyingType(SgType *type);
// DQ (3/2/2014): Added a new interface function (used in the snippet insertion support).
void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList );
// DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators.
bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 );
// JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface
/*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */
struct const_int_expr_t {
size_t value_;
bool hasValue_;
};
struct const_numeric_expr_t {
bool hasValue_;
bool isIntOnly_;
double value_;
};
/*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */
struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr);
struct const_numeric_expr_t evaluateConstNumericExpression(SgExpression *expr);
// JP (9/17/14): Added function to test whether two SgType* are equivalent or not
bool checkTypesAreEqual(SgType *typeA, SgType *typeB);
//--------------------------------Java interface functions ---------------------
#ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
ROSE_DLL_API std::string getTempDirectory(SgProject *project);
ROSE_DLL_API void destroyTempDirectory(std::string);
ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false);
ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string);
ROSE_DLL_API std::string preprocessImport(SgProject *, std::string);
ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true);
ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string);
ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *);
#endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
}// end of namespace
#endif
|
GB_AxB_colscale_meta.c | //------------------------------------------------------------------------------
// GB_AxB_colscale_meta: C=A*D where D is a square diagonal matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// All entries in C=A*D are computed fully in parallel, using the same kind of
// parallelism as Template/GB_reduce_each_vector.c.
{
// Dx, j, and Ah are unused if the operator is FIRST or PAIR
#include "GB_unused.h"
//--------------------------------------------------------------------------
// get C, A, and D
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
const GB_ATYPE *GB_RESTRICT Ax = A_is_pattern ? NULL : A->x ;
const GB_BTYPE *GB_RESTRICT Dx = D_is_pattern ? NULL : D->x ;
//--------------------------------------------------------------------------
// C=A*D
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
//----------------------------------------------------------------------
// C(:,kfirst:klast) = A(:,kfirst:klast)*D(kfirst:klast,kfirst:klast)
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) and C(:,k) to be operated on by this task
//------------------------------------------------------------------
int64_t j = (Ah == NULL) ? k : Ah [k] ;
int64_t pA_start, pA_end ;
GB_get_pA_and_pC (&pA_start, &pA_end, NULL,
tid, k, kfirst, klast, pstart_slice, NULL, NULL, Ap) ;
//------------------------------------------------------------------
// C(:,j) = A(:,j)*D(j,j)
//------------------------------------------------------------------
GB_GETB (djj, Dx, j) ; // djj = D (j,j)
GB_PRAGMA_VECTORIZE
for (int64_t p = pA_start ; p < pA_end ; p++)
{
GB_GETA (aij, Ax, p) ; // aij = A(i,j)
GB_BINOP (GB_CX (p), aij, djj) ; // C(i,j) = aij * djj
}
}
}
}
|
a2.c | #include "omp.h"
void axpy(int N, float *Y, float *X, float a) {
int i;
#pragma omp target data map(to:X[0:N]) map(tofrom:Y[0:N])
#pragma omp parallel for
for (i = 0; i < N; ++i)
Y[i] += a * X[i];
}
|
6191.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel
{
#pragma omp parallel for schedule(static, 1) num_threads(4)
for (i = 0; i < _PB_NY; i++)
y[i] = 0;
#pragma omp parallel for private (j) schedule(static, 1) num_threads(4)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
perftest.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
*
* See file LICENSE for terms.
*/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include "api/libperf.h"
#include "lib/libperf_int.h"
#include <ucs/sys/string.h>
#include <ucs/sys/sys.h>
#include <ucs/debug/log.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <netdb.h>
#include <getopt.h>
#include <string.h>
#include <sys/types.h>
#include <sys/poll.h>
#include <locale.h>
#if HAVE_MPI
# include <mpi.h>
#elif HAVE_RTE
# include<rte.h>
#endif
#define MAX_BATCH_FILES 32
#define TL_RESOURCE_NAME_NONE "<none>"
#define TEST_PARAMS_ARGS "t:n:s:W:O:w:D:i:H:oSCqM:r:T:d:x:A:BUm:"
enum {
TEST_FLAG_PRINT_RESULTS = UCS_BIT(0),
TEST_FLAG_PRINT_TEST = UCS_BIT(1),
TEST_FLAG_SET_AFFINITY = UCS_BIT(8),
TEST_FLAG_NUMERIC_FMT = UCS_BIT(9),
TEST_FLAG_PRINT_FINAL = UCS_BIT(10),
TEST_FLAG_PRINT_CSV = UCS_BIT(11)
};
typedef struct sock_rte_group {
int is_server;
int connfd;
} sock_rte_group_t;
typedef struct test_type {
const char *name;
ucx_perf_api_t api;
ucx_perf_cmd_t command;
ucx_perf_test_type_t test_type;
const char *desc;
} test_type_t;
struct perftest_context {
ucx_perf_params_t params;
const char *server_addr;
int port;
int mpi;
unsigned cpu;
unsigned flags;
unsigned num_batch_files;
char *batch_files[MAX_BATCH_FILES];
char *test_names[MAX_BATCH_FILES];
sock_rte_group_t sock_rte_group;
};
test_type_t tests[] = {
{"am_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_PINGPONG,
"active message latency"},
{"put_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency"},
{"add_lat", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_PINGPONG,
"atomic add latency"},
{"get", UCX_PERF_API_UCT, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate"},
{"fadd", UCX_PERF_API_UCT, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / rate"},
{"swap", UCX_PERF_API_UCT, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / rate"},
{"cswap", UCX_PERF_API_UCT, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / rate"},
{"am_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_AM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"active message bandwidth / message rate"},
{"put_bw", UCX_PERF_API_UCT, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth / message rate"},
{"add_mr", UCX_PERF_API_UCT, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add message rate"},
{"tag_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_PINGPONG,
"tag match latency"},
{"tag_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag match bandwidth"},
{"tag_sync_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_PINGPONG,
"tag sync match latency"},
{"tag_sync_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_TAG_SYNC, UCX_PERF_TEST_TYPE_STREAM_UNI,
"tag sync match bandwidth"},
{"ucp_put_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_PINGPONG,
"put latency"},
{"ucp_put_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_PUT, UCX_PERF_TEST_TYPE_STREAM_UNI,
"put bandwidth"},
{"ucp_get", UCX_PERF_API_UCP, UCX_PERF_CMD_GET, UCX_PERF_TEST_TYPE_STREAM_UNI,
"get latency / bandwidth / message rate"},
{"ucp_add", UCX_PERF_API_UCP, UCX_PERF_CMD_ADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic add bandwidth / message rate"},
{"ucp_fadd", UCX_PERF_API_UCP, UCX_PERF_CMD_FADD, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic fetch-and-add latency / bandwidth / rate"},
{"ucp_swap", UCX_PERF_API_UCP, UCX_PERF_CMD_SWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic swap latency / bandwidth / rate"},
{"ucp_cswap", UCX_PERF_API_UCP, UCX_PERF_CMD_CSWAP, UCX_PERF_TEST_TYPE_STREAM_UNI,
"atomic compare-and-swap latency / bandwidth / rate"},
{"stream_bw", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_STREAM_UNI,
"stream bandwidth"},
{"stream_lat", UCX_PERF_API_UCP, UCX_PERF_CMD_STREAM, UCX_PERF_TEST_TYPE_PINGPONG,
"stream latency"},
{NULL}
};
static int sock_io(int sock, ssize_t (*sock_call)(int, void *, size_t, int),
int poll_events, void *data, size_t size,
void (*progress)(void *arg), void *arg, const char *name)
{
size_t total = 0;
struct pollfd pfd;
int ret;
while (total < size) {
pfd.fd = sock;
pfd.events = poll_events;
pfd.revents = 0;
ret = poll(&pfd, 1, 1); /* poll for 1ms */
if (ret > 0) {
ucs_assert(ret == 1);
ucs_assert(pfd.revents & poll_events);
ret = sock_call(sock, (char*)data + total, size - total, 0);
if (ret < 0) {
ucs_error("%s() failed: %m", name);
return -1;
}
total += ret;
} else if ((ret < 0) && (errno != EINTR)) {
ucs_error("poll(fd=%d) failed: %m", sock);
return -1;
}
/* progress user context */
if (progress != NULL) {
progress(arg);
}
}
return 0;
}
static int safe_send(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
return sock_io(sock, (void*)send, POLLOUT, data, size, progress, arg, "send");
}
static int safe_recv(int sock, void *data, size_t size,
void (*progress)(void *arg), void *arg)
{
return sock_io(sock, recv, POLLIN, data, size, progress, arg, "recv");
}
static void print_progress(char **test_names, unsigned num_names,
const ucx_perf_result_t *result, unsigned flags,
int final)
{
static const char *fmt_csv = "%.0f,%.3f,%.3f,%.3f,%.2f,%.2f,%.0f,%.0f\n";
static const char *fmt_numeric = "%'14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %'11.0f %'11.0f\n";
static const char *fmt_plain = "%14.0f %9.3f %9.3f %9.3f %10.2f %10.2f %11.0f %11.0f\n";
unsigned i;
if (!(flags & TEST_FLAG_PRINT_RESULTS) ||
(!final && (flags & TEST_FLAG_PRINT_FINAL)))
{
return;
}
if (flags & TEST_FLAG_PRINT_CSV) {
for (i = 0; i < num_names; ++i) {
printf("%s,", test_names[i]);
}
}
printf((flags & TEST_FLAG_PRINT_CSV) ? fmt_csv :
(flags & TEST_FLAG_NUMERIC_FMT) ? fmt_numeric :
fmt_plain,
(double)result->iters,
result->latency.typical * 1000000.0,
result->latency.moment_average * 1000000.0,
result->latency.total_average * 1000000.0,
result->bandwidth.moment_average / (1024.0 * 1024.0),
result->bandwidth.total_average / (1024.0 * 1024.0),
result->msgrate.moment_average,
result->msgrate.total_average);
fflush(stdout);
}
static void print_header(struct perftest_context *ctx)
{
const char *test_api_str;
const char *test_data_str;
test_type_t *test;
unsigned i;
if (ctx->flags & TEST_FLAG_PRINT_TEST) {
for (test = tests; test->name; ++test) {
if ((test->command == ctx->params.command) && (test->test_type == ctx->params.test_type)) {
break;
}
}
if (test->name != NULL) {
if (test->api == UCX_PERF_API_UCT) {
test_api_str = "transport layer";
switch (ctx->params.uct.data_layout) {
case UCT_PERF_DATA_LAYOUT_SHORT:
test_data_str = "short";
break;
case UCT_PERF_DATA_LAYOUT_BCOPY:
test_data_str = "bcopy";
break;
case UCT_PERF_DATA_LAYOUT_ZCOPY:
test_data_str = "zcopy";
break;
default:
test_data_str = "(undefined)";
break;
}
} else if (test->api == UCX_PERF_API_UCP) {
test_api_str = "protocol layer";
test_data_str = "(automatic)"; /* TODO contig/stride/stream */
} else {
return;
}
printf("+------------------------------------------------------------------------------------------+\n");
printf("| API: %-60s |\n", test_api_str);
printf("| Test: %-60s |\n", test->desc);
printf("| Data layout: %-60s |\n", test_data_str);
printf("| Message size: %-60zu |\n", ucx_perf_get_message_size(&ctx->params));
}
}
if (ctx->flags & TEST_FLAG_PRINT_CSV) {
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
for (i = 0; i < ctx->num_batch_files; ++i) {
printf("%s,", basename(ctx->batch_files[i]));
}
printf("iterations,typical_lat,avg_lat,overall_lat,avg_bw,overall_bw,avg_mr,overall_mr\n");
}
} else {
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
printf("+--------------+-----------------------------+---------------------+-----------------------+\n");
printf("| | latency (usec) | bandwidth (MB/s) | message rate (msg/s) |\n");
printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n");
printf("| # iterations | typical | average | overall | average | overall | average | overall |\n");
printf("+--------------+---------+---------+---------+----------+----------+-----------+-----------+\n");
} else if (ctx->flags & TEST_FLAG_PRINT_TEST) {
printf("+------------------------------------------------------------------------------------------+\n");
}
}
}
static void print_test_name(struct perftest_context *ctx)
{
char buf[200];
unsigned i, pos;
if (!(ctx->flags & TEST_FLAG_PRINT_CSV) && (ctx->num_batch_files > 0)) {
strcpy(buf, "+--------------+---------+---------+---------+----------+----------+-----------+-----------+");
pos = 1;
for (i = 0; i < ctx->num_batch_files; ++i) {
if (i != 0) {
buf[pos++] = '/';
}
memcpy(&buf[pos], ctx->test_names[i],
ucs_min(strlen(ctx->test_names[i]), sizeof(buf) - pos - 1));
pos += strlen(ctx->test_names[i]);
}
if (ctx->flags & TEST_FLAG_PRINT_RESULTS) {
printf("%s\n", buf);
}
}
}
static void usage(const struct perftest_context *ctx, const char *program)
{
static const char* api_names[] = {
[UCX_PERF_API_UCT] = "UCT",
[UCX_PERF_API_UCP] = "UCP"
};
test_type_t *test;
int UCS_V_UNUSED rank;
#if HAVE_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ctx->mpi && (rank != 0)) {
return;
}
#endif
#if HAVE_MPI
printf(" Note: test can be also launched as an MPI application\n");
printf("\n");
#elif HAVE_RTE
printf(" Note: this test can be also launched as an libRTE application\n");
printf("\n");
#endif
printf(" Usage: %s [ server-hostname ] [ options ]\n", program);
printf("\n");
printf(" Common options:\n");
printf(" -t <test> test to run:\n");
for (test = tests; test->name; ++test) {
printf(" %13s - %s %s\n", test->name,
api_names[test->api], test->desc);
}
printf("\n");
printf(" -s <size> list of scatter-gather sizes for single message (%zu)\n",
ctx->params.msg_size_list[0]);
printf(" for example: \"-s 16,48,8192,8192,14\"\n");
printf(" -n <iters> number of iterations to run (%ld)\n", ctx->params.max_iter);
printf(" -w <iters> number of warm-up iterations (%zu)\n",
ctx->params.warmup_iter);
printf(" -c <cpu> set affinity to this CPU (off)\n");
printf(" -O <count> maximal number of uncompleted outstanding sends (%u)\n",
ctx->params.max_outstanding);
printf(" -i <offset> distance between consecutive scatter-gather entries (%zu)\n",
ctx->params.iov_stride);
printf(" -T <threads> number of threads in the test (%d), if >1 implies \"-M multi\"\n",
ctx->params.thread_count);
printf(" -B register memory with NONBLOCK flag\n");
printf(" -b <file> read and execute tests from a batch file: every line in the\n");
printf(" file is a test to run, first word is test name, the rest of\n");
printf(" the line is command-line arguments for the test.\n");
printf(" -p <port> TCP port to use for data exchange (%d)\n", ctx->port);
#if HAVE_MPI
printf(" -P <0|1> disable/enable MPI mode (%d)\n", ctx->mpi);
#endif
printf(" -h show this help message\n");
printf("\n");
printf(" Output format:\n");
printf(" -N use numeric formatting (thousands separator)\n");
printf(" -f print only final numbers\n");
printf(" -v print CSV-formatted output\n");
printf("\n");
printf(" UCT only:\n");
printf(" -d <device> device to use for testing\n");
printf(" -x <tl> transport to use for testing\n");
printf(" -D <layout> data layout for sender side:\n");
printf(" short - short messages (default, cannot be used for get)\n");
printf(" bcopy - copy-out (cannot be used for atomics)\n");
printf(" zcopy - zero-copy (cannot be used for atomics)\n");
printf(" iov - scatter-gather list (iovec)\n");
printf(" -W <count> flow control window size, for active messages (%u)\n",
ctx->params.uct.fc_window);
printf(" -H <size> active message header size (%zu)\n",
ctx->params.am_hdr_size);
printf(" -A <mode> asynchronous progress mode (thread_spinlock)\n");
printf(" thread_spinlock - separate progress thread with spin locking\n");
printf(" thread_mutex - separate progress thread with mutex locking\n");
printf(" signal - signal-based timer\n");
printf("\n");
printf(" UCP only:\n");
printf(" -M <thread> thread support level for progress engine (single)\n");
printf(" single - only the master thread can access\n");
printf(" serialized - one thread can access at a time\n");
printf(" multi - multiple threads can access\n");
printf(" -D <layout>[,<layout>]\n");
printf(" data layout for sender and receiver side (contig)\n");
printf(" contig - Continuous datatype\n");
printf(" iov - Scatter-gather list\n");
printf(" -C use wild-card tag for tag tests\n");
printf(" -U force unexpected flow by using tag probe\n");
printf(" -r <mode> receive mode for stream tests (recv)\n");
printf(" recv : Use ucp_stream_recv_nb\n");
printf(" recv_data : Use ucp_stream_recv_data_nb\n");
printf(" -m <mem type> memory type of messages\n");
printf(" host - system memory(default)\n");
if (ucx_perf_mem_type_allocators[UCT_MD_MEM_TYPE_CUDA] != NULL) {
printf(" cuda - NVIDIA GPU memory\n");
}
if (ucx_perf_mem_type_allocators[UCT_MD_MEM_TYPE_CUDA_MANAGED] != NULL) {
printf(" cuda-managed - NVIDIA cuda managed/unified memory\n");
}
printf("\n");
printf(" NOTE: When running UCP tests, transport and device should be specified by\n");
printf(" environment variables: UCX_TLS and UCX_[SELF|SHM|NET]_DEVICES.\n");
printf("\n");
}
static const char *__basename(const char *path)
{
const char *p = strrchr(path, '/');
return (p == NULL) ? path : (p + 1);
}
static ucs_status_t parse_ucp_datatype_params(const char *optarg,
ucp_perf_datatype_t *datatype)
{
const char *iov_type = "iov";
const size_t iov_type_size = strlen("iov");
const char *contig_type = "contig";
const size_t contig_type_size = strlen("contig");
if (0 == strncmp(optarg, iov_type, iov_type_size)) {
*datatype = UCP_PERF_DATATYPE_IOV;
} else if (0 == strncmp(optarg, contig_type, contig_type_size)) {
*datatype = UCP_PERF_DATATYPE_CONTIG;
} else {
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
}
static ucs_status_t parse_message_sizes_params(const char *optarg,
ucx_perf_params_t *params)
{
char *optarg_ptr, *optarg_ptr2;
size_t token_num, token_it;
const char delim = ',';
optarg_ptr = (char *)optarg;
token_num = 0;
/* count the number of given message sizes */
while ((optarg_ptr = strchr(optarg_ptr, delim)) != NULL) {
++optarg_ptr;
++token_num;
}
++token_num;
params->msg_size_list = realloc(params->msg_size_list,
sizeof(*params->msg_size_list) * token_num);
if (NULL == params->msg_size_list) {
return UCS_ERR_NO_MEMORY;
}
optarg_ptr = (char *)optarg;
errno = 0;
for (token_it = 0; token_it < token_num; ++token_it) {
params->msg_size_list[token_it] = strtoul(optarg_ptr, &optarg_ptr2, 10);
if (((ERANGE == errno) && (ULONG_MAX == params->msg_size_list[token_it])) ||
((errno != 0) && (params->msg_size_list[token_it] == 0)) ||
(optarg_ptr == optarg_ptr2)) {
free(params->msg_size_list);
params->msg_size_list = NULL; /* prevent double free */
ucs_error("Invalid option substring argument at position %lu", token_it);
return UCS_ERR_INVALID_PARAM;
}
optarg_ptr = optarg_ptr2 + 1;
}
params->msg_size_cnt = token_num;
return UCS_OK;
}
static void init_test_params(ucx_perf_params_t *params)
{
memset(params, 0, sizeof(*params));
params->api = UCX_PERF_API_LAST;
params->command = UCX_PERF_CMD_LAST;
params->test_type = UCX_PERF_TEST_TYPE_LAST;
params->thread_mode = UCS_THREAD_MODE_SINGLE;
params->thread_count = 1;
params->async_mode = UCS_ASYNC_THREAD_LOCK_TYPE;
params->wait_mode = UCX_PERF_WAIT_MODE_LAST;
params->max_outstanding = 1;
params->warmup_iter = 10000;
params->am_hdr_size = 8;
params->alignment = ucs_get_page_size();
params->max_iter = 1000000l;
params->max_time = 0.0;
params->report_interval = 1.0;
params->flags = UCX_PERF_TEST_FLAG_VERBOSE;
params->uct.fc_window = UCT_PERF_TEST_MAX_FC_WINDOW;
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT;
params->mem_type = UCT_MD_MEM_TYPE_HOST;
params->msg_size_cnt = 1;
params->iov_stride = 0;
params->ucp.send_datatype = UCP_PERF_DATATYPE_CONTIG;
params->ucp.recv_datatype = UCP_PERF_DATATYPE_CONTIG;
strcpy(params->uct.dev_name, TL_RESOURCE_NAME_NONE);
strcpy(params->uct.tl_name, TL_RESOURCE_NAME_NONE);
params->msg_size_list = malloc(sizeof(*params->msg_size_list) *
params->msg_size_cnt);
params->msg_size_list[0] = 8;
}
static ucs_status_t parse_test_params(ucx_perf_params_t *params, char opt, const char *optarg)
{
test_type_t *test;
char *optarg2 = NULL;
switch (opt) {
case 'd':
ucs_snprintf_zero(params->uct.dev_name, sizeof(params->uct.dev_name),
"%s", optarg);
return UCS_OK;
case 'x':
ucs_snprintf_zero(params->uct.tl_name, sizeof(params->uct.tl_name),
"%s", optarg);
return UCS_OK;
case 't':
for (test = tests; test->name; ++test) {
if (!strcmp(optarg, test->name)) {
params->api = test->api;
params->command = test->command;
params->test_type = test->test_type;
break;
}
}
if (test->name == NULL) {
ucs_error("Invalid option argument for -t");
return UCS_ERR_INVALID_PARAM;
}
return UCS_OK;
case 'D':
if (!strcmp(optarg, "short")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_SHORT;
} else if (!strcmp(optarg, "bcopy")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_BCOPY;
} else if (!strcmp(optarg, "zcopy")) {
params->uct.data_layout = UCT_PERF_DATA_LAYOUT_ZCOPY;
} else if (UCS_OK == parse_ucp_datatype_params(optarg,
¶ms->ucp.send_datatype)) {
optarg2 = strchr(optarg, ',');
if (optarg2) {
if (UCS_OK != parse_ucp_datatype_params(optarg2 + 1,
¶ms->ucp.recv_datatype)) {
return -1;
}
}
} else {
ucs_error("Invalid option argument for -D");
return -1;
}
return UCS_OK;
case 'i':
params->iov_stride = atol(optarg);
return UCS_OK;
case 'n':
params->max_iter = atol(optarg);
return UCS_OK;
case 's':
return parse_message_sizes_params(optarg, params);
case 'H':
params->am_hdr_size = atol(optarg);
return UCS_OK;
case 'W':
params->uct.fc_window = atoi(optarg);
return UCS_OK;
case 'O':
params->max_outstanding = atoi(optarg);
return UCS_OK;
case 'w':
params->warmup_iter = atol(optarg);
return UCS_OK;
case 'o':
params->flags |= UCX_PERF_TEST_FLAG_ONE_SIDED;
return UCS_OK;
case 'B':
params->flags |= UCX_PERF_TEST_FLAG_MAP_NONBLOCK;
return UCS_OK;
case 'q':
params->flags &= ~UCX_PERF_TEST_FLAG_VERBOSE;
return UCS_OK;
case 'C':
params->flags |= UCX_PERF_TEST_FLAG_TAG_WILDCARD;
return UCS_OK;
case 'U':
params->flags |= UCX_PERF_TEST_FLAG_TAG_UNEXP_PROBE;
return UCS_OK;
case 'M':
if (!strcmp(optarg, "single")) {
params->thread_mode = UCS_THREAD_MODE_SINGLE;
return UCS_OK;
} else if (!strcmp(optarg, "serialized")) {
params->thread_mode = UCS_THREAD_MODE_SERIALIZED;
return UCS_OK;
} else if (!strcmp(optarg, "multi")) {
params->thread_mode = UCS_THREAD_MODE_MULTI;
return UCS_OK;
} else {
ucs_error("Invalid option argument for -M");
return UCS_ERR_INVALID_PARAM;
}
case 'T':
params->thread_count = atoi(optarg);
params->thread_mode = UCS_THREAD_MODE_MULTI;
return UCS_OK;
case 'A':
if (!strcmp(optarg, "thread") || !strcmp(optarg, "thread_spinlock")) {
params->async_mode = UCS_ASYNC_MODE_THREAD_SPINLOCK;
return UCS_OK;
} else if (!strcmp(optarg, "thread_mutex")) {
params->async_mode = UCS_ASYNC_MODE_THREAD_MUTEX;
return UCS_OK;
} else if (!strcmp(optarg, "signal")) {
params->async_mode = UCS_ASYNC_MODE_SIGNAL;
return UCS_OK;
} else {
ucs_error("Invalid option argument for -A");
return UCS_ERR_INVALID_PARAM;
}
case 'r':
if (!strcmp(optarg, "recv_data")) {
params->flags |= UCX_PERF_TEST_FLAG_STREAM_RECV_DATA;
return UCS_OK;
} else if (!strcmp(optarg, "recv")) {
params->flags &= ~UCX_PERF_TEST_FLAG_STREAM_RECV_DATA;
return UCS_OK;
}
return UCS_ERR_INVALID_PARAM;
case 'm':
if (!strcmp(optarg, "host")) {
params->mem_type = UCT_MD_MEM_TYPE_HOST;
return UCS_OK;
} else if (!strcmp(optarg, "cuda") &&
(ucx_perf_mem_type_allocators[UCT_MD_MEM_TYPE_CUDA] != NULL)) {
params->mem_type = UCT_MD_MEM_TYPE_CUDA;
return UCS_OK;
} else if (!strcmp(optarg, "cuda-managed") &&
(ucx_perf_mem_type_allocators[UCT_MD_MEM_TYPE_CUDA_MANAGED] != NULL)) {
params->mem_type = UCT_MD_MEM_TYPE_CUDA_MANAGED;
return UCS_OK;
}
return UCS_ERR_INVALID_PARAM;
default:
return UCS_ERR_INVALID_PARAM;
}
}
static ucs_status_t read_batch_file(FILE *batch_file, const char *file_name,
int *line_num, ucx_perf_params_t *params,
char** test_name_p)
{
#define MAX_SIZE 256
#define MAX_ARG_SIZE 2048
ucs_status_t status;
char buf[MAX_ARG_SIZE];
int argc;
char *argv[MAX_SIZE + 1];
int c;
char *p;
do {
if (fgets(buf, sizeof(buf) - 1, batch_file) == NULL) {
return UCS_ERR_NO_ELEM;
}
++(*line_num);
argc = 0;
p = strtok(buf, " \t\n\r");
while (p && (argc < MAX_SIZE)) {
argv[argc++] = p;
p = strtok(NULL, " \t\n\r");
}
argv[argc] = NULL;
} while ((argc == 0) || (argv[0][0] == '#'));
optind = 1;
while ((c = getopt (argc, argv, TEST_PARAMS_ARGS)) != -1) {
status = parse_test_params(params, c, optarg);
if (status != UCS_OK) {
ucs_error("in batch file '%s' line %d: -%c %s: %s",
file_name, *line_num, c, optarg, ucs_status_string(status));
return status;
}
}
*test_name_p = strdup(argv[0]);
return UCS_OK;
}
static ucs_status_t parse_opts(struct perftest_context *ctx, int mpi_initialized,
int argc, char **argv)
{
ucs_status_t status;
int c;
ucs_trace_func("");
ucx_perf_global_init(); /* initialize memory types */
init_test_params(&ctx->params);
ctx->server_addr = NULL;
ctx->num_batch_files = 0;
ctx->port = 13337;
ctx->flags = 0;
ctx->mpi = mpi_initialized;
optind = 1;
while ((c = getopt (argc, argv, "p:b:Nfvc:P:h" TEST_PARAMS_ARGS)) != -1) {
switch (c) {
case 'p':
ctx->port = atoi(optarg);
break;
case 'b':
if (ctx->num_batch_files < MAX_BATCH_FILES) {
ctx->batch_files[ctx->num_batch_files++] = optarg;
}
break;
case 'N':
ctx->flags |= TEST_FLAG_NUMERIC_FMT;
break;
case 'f':
ctx->flags |= TEST_FLAG_PRINT_FINAL;
break;
case 'v':
ctx->flags |= TEST_FLAG_PRINT_CSV;
break;
case 'c':
ctx->flags |= TEST_FLAG_SET_AFFINITY;
ctx->cpu = atoi(optarg);
break;
case 'P':
#if HAVE_MPI
ctx->mpi = atoi(optarg) && mpi_initialized;
break;
#endif
case 'h':
usage(ctx, __basename(argv[0]));
return UCS_ERR_CANCELED;
default:
status = parse_test_params(&ctx->params, c, optarg);
if (status != UCS_OK) {
usage(ctx, __basename(argv[0]));
return status;
}
break;
}
}
if (optind < argc) {
ctx->server_addr = argv[optind];
}
return UCS_OK;
}
static unsigned sock_rte_group_size(void *rte_group)
{
return 2;
}
static unsigned sock_rte_group_index(void *rte_group)
{
sock_rte_group_t *group = rte_group;
return group->is_server ? 0 : 1;
}
static void sock_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp master
{
sock_rte_group_t *group = rte_group;
const unsigned magic = 0xdeadbeef;
unsigned sync;
sync = magic;
safe_send(group->connfd, &sync, sizeof(unsigned), progress, arg);
sync = 0;
safe_recv(group->connfd, &sync, sizeof(unsigned), progress, arg);
ucs_assert(sync == magic);
}
#pragma omp barrier
}
static void sock_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
sock_rte_group_t *group = rte_group;
size_t size;
int i;
size = 0;
for (i = 0; i < iovcnt; ++i) {
size += iovec[i].iov_len;
}
safe_send(group->connfd, &size, sizeof(size), NULL, NULL);
for (i = 0; i < iovcnt; ++i) {
safe_send(group->connfd, iovec[i].iov_base, iovec[i].iov_len, NULL,
NULL);
}
}
static void sock_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
sock_rte_group_t *group = rte_group;
int group_index;
size_t size;
group_index = sock_rte_group_index(rte_group);
if (src == group_index) {
return;
}
ucs_assert_always(src == (1 - group_index));
safe_recv(group->connfd, &size, sizeof(size), NULL, NULL);
ucs_assert_always(size <= max);
safe_recv(group->connfd, buffer, size, NULL, NULL);
}
static void sock_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t sock_rte = {
.group_size = sock_rte_group_size,
.group_index = sock_rte_group_index,
.barrier = sock_rte_barrier,
.post_vec = sock_rte_post_vec,
.recv = sock_rte_recv,
.exchange_vec = (void*)ucs_empty_function,
.report = sock_rte_report,
};
static ucs_status_t setup_sock_rte(struct perftest_context *ctx)
{
struct sockaddr_in inaddr;
struct hostent *he;
ucs_status_t status;
int optval = 1;
int sockfd, connfd;
int ret;
sockfd = socket(AF_INET, SOCK_STREAM, 0);
if (sockfd < 0) {
ucs_error("socket() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err;
}
if (ctx->server_addr == NULL) {
optval = 1;
ret = setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
if (ret < 0) {
ucs_error("setsockopt(SO_REUSEADDR) failed: %m");
status = UCS_ERR_INVALID_PARAM;
goto err_close_sockfd;
}
inaddr.sin_family = AF_INET;
inaddr.sin_port = htons(ctx->port);
inaddr.sin_addr.s_addr = INADDR_ANY;
memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero));
ret = bind(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr));
if (ret < 0) {
ucs_error("bind() failed: %m");
status = UCS_ERR_INVALID_ADDR;
goto err_close_sockfd;
}
ret = listen(sockfd, 10);
if (ret < 0) {
ucs_error("listen() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
printf("Waiting for connection...\n");
/* Accept next connection */
connfd = accept(sockfd, NULL, NULL);
if (connfd < 0) {
ucs_error("accept() failed: %m");
status = UCS_ERR_IO_ERROR;
goto err_close_sockfd;
}
close(sockfd);
safe_recv(connfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ctx->params.msg_size_cnt) {
ctx->params.msg_size_list = malloc(sizeof(*ctx->params.msg_size_list) *
ctx->params.msg_size_cnt);
if (NULL == ctx->params.msg_size_list) {
status = UCS_ERR_NO_MEMORY;
goto err_close_connfd;
}
safe_recv(connfd, ctx->params.msg_size_list,
sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt,
NULL, NULL);
}
ctx->sock_rte_group.connfd = connfd;
ctx->sock_rte_group.is_server = 1;
} else {
he = gethostbyname(ctx->server_addr);
if (he == NULL || he->h_addr_list == NULL) {
ucs_error("host %s not found: %s", ctx->server_addr,
hstrerror(h_errno));
status = UCS_ERR_INVALID_ADDR;
goto err_close_sockfd;
}
inaddr.sin_family = he->h_addrtype;
inaddr.sin_port = htons(ctx->port);
ucs_assert(he->h_length == sizeof(inaddr.sin_addr));
memcpy(&inaddr.sin_addr, he->h_addr_list[0], he->h_length);
memset(inaddr.sin_zero, 0, sizeof(inaddr.sin_zero));
ret = connect(sockfd, (struct sockaddr*)&inaddr, sizeof(inaddr));
if (ret < 0) {
ucs_error("connect() failed: %m");
status = UCS_ERR_UNREACHABLE;
goto err_close_sockfd;
}
safe_send(sockfd, &ctx->params, sizeof(ctx->params), NULL, NULL);
if (ctx->params.msg_size_cnt) {
safe_send(sockfd, ctx->params.msg_size_list,
sizeof(*ctx->params.msg_size_list) * ctx->params.msg_size_cnt,
NULL, NULL);
}
ctx->sock_rte_group.connfd = sockfd;
ctx->sock_rte_group.is_server = 0;
}
if (ctx->sock_rte_group.is_server) {
ctx->flags |= TEST_FLAG_PRINT_TEST;
} else {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = &ctx->sock_rte_group;
ctx->params.rte = &sock_rte;
ctx->params.report_arg = ctx;
return UCS_OK;
err_close_connfd:
close(connfd);
goto err;
err_close_sockfd:
close(sockfd);
err:
return status;
}
static ucs_status_t cleanup_sock_rte(struct perftest_context *ctx)
{
close(ctx->sock_rte_group.connfd);
return UCS_OK;
}
#if HAVE_MPI
static unsigned mpi_rte_group_size(void *rte_group)
{
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
return size;
}
static unsigned mpi_rte_group_index(void *rte_group)
{
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
return rank;
}
static void mpi_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
int group_size, my_rank, i;
MPI_Request *reqs;
int nreqs = 0;
int dummy;
int flag;
#pragma omp master
/*
* Naive non-blocking barrier implementation over send/recv, to call user
* progress while waiting for completion.
* Not using MPI_Ibarrier to be compatible with MPI-1.
*/
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
/* allocate maximal possible number of requests */
reqs = (MPI_Request*)alloca(sizeof(*reqs) * group_size);
if (my_rank == 0) {
/* root gathers "ping" from all other ranks */
for (i = 1; i < group_size; ++i) {
MPI_Irecv(&dummy, 0, MPI_INT,
i /* source */,
1 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
} else {
/* every non-root rank sends "ping" and waits for "pong" */
MPI_Send(&dummy, 0, MPI_INT,
0 /* dest */,
1 /* tag */,
MPI_COMM_WORLD);
MPI_Irecv(&dummy, 0, MPI_INT,
0 /* source */,
2 /* tag */,
MPI_COMM_WORLD,
&reqs[nreqs++]);
}
/* Waiting for receive requests */
do {
MPI_Testall(nreqs, reqs, &flag, MPI_STATUSES_IGNORE);
progress(arg);
} while (!flag);
if (my_rank == 0) {
/* root sends "pong" to all ranks */
for (i = 1; i < group_size; ++i) {
MPI_Send(&dummy, 0, MPI_INT,
i /* dest */,
2 /* tag */,
MPI_COMM_WORLD);
}
}
#pragma omp barrier
}
static void mpi_rte_post_vec(void *rte_group, const struct iovec *iovec,
int iovcnt, void **req)
{
int group_size;
int my_rank;
int dest, i;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &group_size);
for (dest = 0; dest < group_size; ++dest) {
if (dest == my_rank) {
continue;
}
for (i = 0; i < iovcnt; ++i) {
MPI_Send(iovec[i].iov_base, iovec[i].iov_len, MPI_BYTE, dest,
i == (iovcnt - 1), /* Send last iov with tag == 1 */
MPI_COMM_WORLD);
}
}
*req = (void*)(uintptr_t)1;
}
static void mpi_rte_recv(void *rte_group, unsigned src, void *buffer, size_t max,
void *req)
{
MPI_Status status;
size_t offset;
int my_rank;
int count;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (src == my_rank) {
return;
}
offset = 0;
do {
ucs_assert_always(offset < max);
MPI_Recv(buffer + offset, max - offset, MPI_BYTE, src, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
MPI_Get_count(&status, MPI_BYTE, &count);
offset += count;
} while (status.MPI_TAG != 1);
}
static void mpi_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t mpi_rte = {
.group_size = mpi_rte_group_size,
.group_index = mpi_rte_group_index,
.barrier = mpi_rte_barrier,
.post_vec = mpi_rte_post_vec,
.recv = mpi_rte_recv,
.exchange_vec = (void*)ucs_empty_function,
.report = mpi_rte_report,
};
#elif HAVE_RTE
static unsigned ext_rte_group_size(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_size(group);
}
static unsigned ext_rte_group_index(void *rte_group)
{
rte_group_t group = (rte_group_t)rte_group;
return rte_group_rank(group);
}
static void ext_rte_barrier(void *rte_group, void (*progress)(void *arg),
void *arg)
{
#pragma omp master
{
rte_group_t group = (rte_group_t)rte_group;
int rc;
rc = rte_barrier(group);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_barrier");
}
}
#pragma omp barrier
}
static void ext_rte_post_vec(void *rte_group, const struct iovec* iovec,
int iovcnt, void **req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session;
rte_iovec_t *r_vec;
int i, rc;
rc = rte_srs_session_create(group, 0, &session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_create");
}
r_vec = calloc(iovcnt, sizeof(rte_iovec_t));
if (r_vec == NULL) {
return;
}
for (i = 0; i < iovcnt; ++i) {
r_vec[i].iov_base = iovec[i].iov_base;
r_vec[i].type = rte_datatype_uint8_t;
r_vec[i].count = iovec[i].iov_len;
}
rc = rte_srs_set_data(session, "KEY_PERF", r_vec, iovcnt);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_set_data");
}
*req = session;
free(r_vec);
}
static void ext_rte_recv(void *rte_group, unsigned src, void *buffer,
size_t max, void *req)
{
rte_group_t group = (rte_group_t)rte_group;
rte_srs_session_t session = (rte_srs_session_t)req;
void *rte_buffer = NULL;
rte_iovec_t r_vec;
uint32_t offset;
int size;
int rc;
rc = rte_srs_get_data(session, rte_group_index_to_ec(group, src),
"KEY_PERF", &rte_buffer, &size);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_get_data");
return;
}
r_vec.iov_base = buffer;
r_vec.type = rte_datatype_uint8_t;
r_vec.count = max;
offset = 0;
rte_unpack(&r_vec, rte_buffer, &offset);
rc = rte_srs_session_destroy(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_session_destroy");
}
free(rte_buffer);
}
static void ext_rte_exchange_vec(void *rte_group, void * req)
{
rte_srs_session_t session = (rte_srs_session_t)req;
int rc;
rc = rte_srs_exchange_data(session);
if (RTE_SUCCESS != rc) {
ucs_error("Failed to rte_srs_exchange_data");
}
}
static void ext_rte_report(void *rte_group, const ucx_perf_result_t *result,
void *arg, int is_final)
{
struct perftest_context *ctx = arg;
print_progress(ctx->test_names, ctx->num_batch_files, result, ctx->flags,
is_final);
}
static ucx_perf_rte_t ext_rte = {
.group_size = ext_rte_group_size,
.group_index = ext_rte_group_index,
.barrier = ext_rte_barrier,
.report = ext_rte_report,
.post_vec = ext_rte_post_vec,
.recv = ext_rte_recv,
.exchange_vec = ext_rte_exchange_vec,
};
#endif
static ucs_status_t setup_mpi_rte(struct perftest_context *ctx)
{
ucs_trace_func("");
#if HAVE_MPI
int size, rank;
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (size != 2) {
ucs_error("This test should run with exactly 2 processes (actual: %d)", size);
return UCS_ERR_INVALID_PARAM;
}
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (rank == 1) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = NULL;
ctx->params.rte = &mpi_rte;
ctx->params.report_arg = ctx;
#elif HAVE_RTE
rte_group_t group;
rte_init(NULL, NULL, &group);
if (1 == rte_group_rank(group)) {
ctx->flags |= TEST_FLAG_PRINT_RESULTS;
}
ctx->params.rte_group = group;
ctx->params.rte = &ext_rte;
ctx->params.report_arg = ctx;
#endif
return UCS_OK;
}
static ucs_status_t cleanup_mpi_rte(struct perftest_context *ctx)
{
#if HAVE_RTE
rte_finalize();
#endif
return UCS_OK;
}
static ucs_status_t check_system(struct perftest_context *ctx)
{
cpu_set_t cpuset;
unsigned i, count, nr_cpus;
int ret;
ucs_trace_func("");
ret = sysconf(_SC_NPROCESSORS_CONF);
if (ret < 0) {
ucs_error("failed to get local cpu count: %m");
return UCS_ERR_INVALID_PARAM;
}
nr_cpus = ret;
memset(&cpuset, 0, sizeof(cpuset));
if (ctx->flags & TEST_FLAG_SET_AFFINITY) {
if (ctx->cpu >= nr_cpus) {
ucs_error("cpu (%u) ot of range (0..%u)", ctx->cpu, nr_cpus - 1);
return UCS_ERR_INVALID_PARAM;
}
CPU_SET(ctx->cpu, &cpuset);
ret = sched_setaffinity(0, sizeof(cpuset), &cpuset);
if (ret) {
ucs_warn("sched_setaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
} else {
ret = sched_getaffinity(0, sizeof(cpuset), &cpuset);
if (ret) {
ucs_warn("sched_getaffinity() failed: %m");
return UCS_ERR_INVALID_PARAM;
}
count = 0;
for (i = 0; i < CPU_SETSIZE; ++i) {
if (CPU_ISSET(i, &cpuset)) {
++count;
}
}
if (count > 2) {
ucs_warn("CPU affinity is not set (bound to %u cpus)."
" Performance may be impacted.", count);
}
}
return UCS_OK;
}
static void clone_params(ucx_perf_params_t *dest, const ucx_perf_params_t *src)
{
size_t msg_size_list_size;
*dest = *src;
msg_size_list_size = dest->msg_size_cnt * sizeof(*dest->msg_size_list);
dest->msg_size_list = malloc(msg_size_list_size);
memcpy(dest->msg_size_list, src->msg_size_list, msg_size_list_size);
}
static ucs_status_t run_test_recurs(struct perftest_context *ctx,
ucx_perf_params_t *parent_params,
unsigned depth)
{
ucx_perf_params_t params;
ucx_perf_result_t result;
ucs_status_t status;
FILE *batch_file;
int line_num;
ucs_trace_func("depth=%u, num_files=%u", depth, ctx->num_batch_files);
if (parent_params->api == UCX_PERF_API_UCP) {
if (strcmp(parent_params->uct.dev_name, TL_RESOURCE_NAME_NONE)) {
ucs_warn("-d '%s' ignored for UCP test; see NOTES section in help message",
parent_params->uct.dev_name);
}
if (strcmp(parent_params->uct.tl_name, TL_RESOURCE_NAME_NONE)) {
ucs_warn("-x '%s' ignored for UCP test; see NOTES section in help message",
parent_params->uct.tl_name);
}
}
if (depth >= ctx->num_batch_files) {
print_test_name(ctx);
return ucx_perf_run(parent_params, &result);
}
batch_file = fopen(ctx->batch_files[depth], "r");
if (batch_file == NULL) {
ucs_error("Failed to open batch file '%s': %m", ctx->batch_files[depth]);
return UCS_ERR_IO_ERROR;
}
clone_params(¶ms, parent_params);
line_num = 0;
while ((status = read_batch_file(batch_file, ctx->batch_files[depth],
&line_num, ¶ms,
&ctx->test_names[depth])) == UCS_OK) {
status = run_test_recurs(ctx, ¶ms, depth + 1);
free(params.msg_size_list);
free(ctx->test_names[depth]);
ctx->test_names[depth] = NULL;
clone_params(¶ms, parent_params);
}
free(params.msg_size_list);
fclose(batch_file);
return UCS_OK;
}
static ucs_status_t run_test(struct perftest_context *ctx)
{
ucs_status_t status;
ucs_trace_func("");
setlocale(LC_ALL, "en_US");
print_header(ctx);
status = run_test_recurs(ctx, &ctx->params, 0);
if (status != UCS_OK) {
ucs_error("Failed to run test: %s", ucs_status_string(status));
}
return status;
}
int main(int argc, char **argv)
{
struct perftest_context ctx;
ucs_status_t status;
int mpi_initialized;
int mpi_rte;
int ret;
#if HAVE_MPI
mpi_initialized = !isatty(0) && (MPI_Init(&argc, &argv) == 0);
#else
mpi_initialized = 0;
#endif
/* Parse command line */
status = parse_opts(&ctx, mpi_initialized, argc, argv);
if (status != UCS_OK) {
ret = (status == UCS_ERR_CANCELED) ? 0 : -127;
goto out;
}
#ifdef __COVERITY__
/* coverity[dont_call] */
mpi_rte = rand(); /* Shut up deadcode error */
#endif
if (ctx.mpi) {
mpi_rte = 1;
} else {
#if HAVE_RTE
mpi_rte = 1;
#else
mpi_rte = 0;
#endif
}
status = check_system(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out;
}
/* Create RTE */
status = (mpi_rte) ? setup_mpi_rte(&ctx) : setup_sock_rte(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out;
}
/* Run the test */
status = run_test(&ctx);
if (status != UCS_OK) {
ret = -1;
goto out_cleanup_rte;
}
ret = 0;
out_cleanup_rte:
(mpi_rte) ? cleanup_mpi_rte(&ctx) : cleanup_sock_rte(&ctx);
out:
if (ctx.params.msg_size_list) {
free(ctx.params.msg_size_list);
}
if (mpi_initialized) {
#if HAVE_MPI
MPI_Finalize();
#endif
}
return ret;
}
|
omp2.c | // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -O1 -fno-vectorize -fno-unroll-loops -disable-llvm-optzns %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out; fi
// RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -O1 -fno-vectorize -fno-unroll-loops %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi
// RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -O2 -fno-vectorize -fno-unroll-loops %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi
// RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -O3 -fno-vectorize -fno-unroll-loops %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi
// note not doing O0 below as to ensure we get tbaa
// RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -O1 -fno-vectorize -fno-unroll-loops -Xclang -disable-llvm-optzns %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out; fi
// RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -O1 -fno-vectorize -fno-unroll-loops %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi
// RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -O2 -fno-vectorize -fno-unroll-loops %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi
// RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -O3 -fno-vectorize -fno-unroll-loops %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include "test_utils.h"
double __enzyme_autodiff(void*, ...);
/*
void omp(float& a, int N) {
#define N 20
#pragma omp parallel for
for (int i=0; i<N; i++) {
//a[i] *= a[i];
(&a)[i] *= (&a)[i];
}
#undef N
(&a)[0] = 0;
}
*/
void omp(float* a, int N, int M) {
#pragma omp parallel for
#pragma nounroll
for (unsigned int i=M; i<N; i++) {
//a[i] *= a[i];
a[i] *= a[i];
}
a[0] = 0;
}
int main(int argc, char** argv) {
int N = 20;
int M = 10;
float a[N];
for(int i=0; i<N; i++) {
a[i] = i+1;
}
float d_a[N];
for(int i=0; i<N; i++)
d_a[i] = 1.0f;
//omp(*a, N);
printf("ran omp\n");
__enzyme_autodiff((void*)omp, a, d_a, N, M);
for(int i=0; i<N; i++) {
printf("a[%d]=%f d_a[%d]=%f\n", i, a[i], i, d_a[i]);
}
//APPROX_EQ(da, 17711.0*2, 1e-10);
//APPROX_EQ(db, 17711.0*2, 1e-10);
//printf("hello! %f, res2 %f, da: %f, db: %f\n", ret, ret, da,db);
APPROX_EQ(d_a[0], 0.0f, 1e-10);
for(int i=1; i<N; i++) {
if (i < M) {
APPROX_EQ(d_a[i], 1.0f, 1e-10);
} else {
APPROX_EQ(d_a[i], 2.0f*(i+1), 1e-10);
}
}
return 0;
}
|
karman-par.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <getopt.h>
#include <errno.h>
#include <mpi.h>
#include <immintrin.h>
#include "alloc.h"
#include "boundary.h"
#include "datadef.h"
#include "init.h"
#include "simulation-par.h"
#include "tiles.h"
void write_bin(float **u, float **v, float **p, char **flag,
int imax, int jmax, float xlength, float ylength, char *file);
int read_bin(float **u, float **v, float **p, char **flag,
int imax, int jmax, float xlength, float ylength, char *file);
static void print_usage(void);
static void print_version(void);
static void print_help(void);
static char *progname;
int proc = 0; /* Rank of the current process */
int nprocs = 0; /* Number of processes in communicator */
int *ileft, *iright; /* Array bounds for each processor */
#define PACKAGE "karman"
#define VERSION "1.0"
/* Command line options */
static struct option long_opts[] = {
{ "del-t", 1, NULL, 'd' },
{ "help", 0, NULL, 'h' },
{ "imax", 1, NULL, 'x' },
{ "infile", 1, NULL, 'i' },
{ "jmax", 1, NULL, 'y' },
{ "outfile", 1, NULL, 'o' },
{ "t-end", 1, NULL, 't' },
{ "verbose", 1, NULL, 'v' },
{ "version", 1, NULL, 'V' },
{ 0, 0, 0, 0 }
};
#define GETOPTS "d:hi:o:t:v:Vx:y:"
int main(int argc, char *argv[])
{
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
int verbose = 1; /* Verbosity level */
float xlength = 22.0; /* Width of simulated domain */
float ylength = 4.1; /* Height of simulated domain */
int imax = 660; /* Number of cells horizontally */
int jmax = 120; /* Number of cells vertically */
char *infile; /* Input raw initial conditions */
char *outfile; /* Output raw simulation results */
float t_end = 2.1; /* Simulation runtime */
float del_t = 0.003; /* Duration of each timestep */
float tau = 0.5; /* Safety factor for timestep control */
int itermax = 100; /* Maximum number of iterations in SOR */
float eps = 0.001; /* Stopping error threshold for SOR */
float omega = 1.7; /* Relaxation parameter for SOR */
float gamma = 0.9; /* Upwind differencing factor in PDE
discretisation */
float Re = 150.0; /* Reynolds number */
float ui = 1.0; /* Initial X velocity */
float vi = 0.0; /* Initial Y velocity */
float t, delx, dely;
int i, j, itersor = 0, ifluid = 0, ibound = 0;
float res;
float **u, **v, **p, **rhs, **f, **g;
char **flag;
int init_case, iters = 0;
int show_help = 0, show_usage = 0, show_version = 0;
progname = argv[0];
infile = strdup("karman.bin");
outfile = strdup("karman.bin");
int optc;
while ((optc = getopt_long(argc, argv, GETOPTS, long_opts, NULL)) != -1) {
switch (optc) {
case 'h':
show_help = 1;
break;
case 'V':
show_version = 1;
break;
case 'v':
verbose = atoi(optarg);
break;
case 'x':
imax = atoi(optarg);
break;
case 'y':
jmax = atoi(optarg);
break;
case 'i':
free(infile);
infile = strdup(optarg);
break;
case 'o':
free(outfile);
outfile = strdup(optarg);
break;
case 'd':
del_t = atof(optarg);
break;
case 't':
t_end = atof(optarg);
break;
default:
show_usage = 1;
}
}
if (show_usage || optind < argc) {
print_usage();
return 1;
}
if (show_version) {
print_version();
if (!show_help) {
return 0;
}
}
if (show_help) {
print_help();
return 0;
}
double program_start = MPI_Wtime();
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &proc);
// test_halo_sync(proc, nprocs);
delx = xlength/imax;
dely = ylength/jmax;
/* Allocate arrays */
u = alloc_floatmatrix(imax+2, jmax+2);
v = alloc_floatmatrix(imax+2, jmax+2);
f = alloc_floatmatrix(imax+2, jmax+2);
g = alloc_floatmatrix(imax+2, jmax+2);
p = alloc_floatmatrix(imax+2, jmax+2);
rhs = alloc_floatmatrix(imax+2, jmax+2);
flag = alloc_charmatrix(imax+2, jmax+2);
// Initialise the tile data
struct TileData tile_data;
init_tile_data(proc, nprocs, imax + 2, jmax + 2, &tile_data);
// Output the info for debug purposes
if (proc == 0) {
printf("I am process %d. The mesh has been split into a shape of %dx%d. Tiles sizes are %dx%d\n. My pos is %dx%d. Array access is [%d:%d, %d:%d]\n"
, proc,
tile_data.num_x,
tile_data.num_y,
tile_data.width,
tile_data.height,
tile_data.pos_x,
tile_data.pos_y,
tile_data.start_x,
tile_data.end_x,
tile_data.start_y,
tile_data.end_y);
}
if (!u || !v || !f || !g || !p || !rhs || !flag) {
fprintf(stderr, "Couldn't allocate memory for matrices.\n");
return 1;
}
/* Read in initial values from a file if it exists */
init_case = read_bin(u, v, p, flag, imax, jmax, xlength, ylength, infile);
if (init_case > 0) {
/* Error while reading file */
return 1;
}
if (init_case < 0) {
/* Set initial values if file doesn't exist */
#pragma omp parallel for private(i, j) firstprivate(ui, vi, u, v, p, imax, jmax) default(none)
for (i=0;i<=imax+1;i++) {
for (j=0;j<=jmax+1;j++) {
u[i][j] = ui;
v[i][j] = vi;
p[i][j] = 0.0;
}
}
init_flag(flag, imax, jmax, delx, dely, &ibound);
apply_tile_boundary_conditions(u, v, flag, imax, jmax, ui, vi, &tile_data);
double sync_time_taken = 0.0;
halo_sync(proc, u, &tile_data, &sync_time_taken);
halo_sync(proc, v, &tile_data, &sync_time_taken);
halo_sync(proc, p, &tile_data, &sync_time_taken);
}
// Variables to keep track of time taken for each part
double start, timestep_time_taken, compute_velocity_time_taken, rhs_time_taken, possion_time_taken, update_velocity_time_taken, boundary_time_taken;
double sync_time_taken, possion_p_loop_time_taken, possion_res_loop_time_taken = 0.0;
/* Main loop */
for (t = 0.0; t < t_end; t += del_t, iters++) {
// Reset the cumulative time variables for each new loop iteration
sync_time_taken = 0.0;
possion_p_loop_time_taken = 0.0;
possion_res_loop_time_taken = 0.0;
start = MPI_Wtime();
set_timestep_interval(&del_t, imax, jmax, delx, dely, u, v, Re, tau, &tile_data, &sync_time_taken);
timestep_time_taken = MPI_Wtime() - start;
ifluid = (imax * jmax) - ibound;
start = MPI_Wtime();
compute_tentative_velocity(u, v, f, g, flag, imax, jmax,
del_t, delx, dely, gamma, Re, &tile_data, &sync_time_taken);
compute_velocity_time_taken = MPI_Wtime() - start;
start = MPI_Wtime();
compute_rhs(f, g, rhs, flag, imax, jmax, del_t, delx, dely, &tile_data);
rhs_time_taken = MPI_Wtime() - start;
start = MPI_Wtime();
if (ifluid > 0) {
itersor = poisson(p, rhs, flag, imax, jmax, delx, dely,
eps, itermax, omega, &res, ifluid, &tile_data, &sync_time_taken, &possion_p_loop_time_taken, &possion_res_loop_time_taken);
//screw_it_sync_everything(proc, u, &tile_data, &sync_time_taken);
} else {
itersor = 0;
}
possion_time_taken = MPI_Wtime() - start;
if (proc == 0 && verbose > 1) {
printf("%d t:%g, del_t:%g, SOR iters:%3d, res:%e, bcells:%d\n",
iters, t+del_t, del_t, itersor, res, ibound);
}
start = MPI_Wtime();
update_velocity(u, v, f, g, p, flag, imax, jmax, del_t, delx, dely, &tile_data, &sync_time_taken);
update_velocity_time_taken = MPI_Wtime() - start;
start = MPI_Wtime();
apply_tile_boundary_conditions(u, v, flag, imax, jmax, ui, vi, &tile_data);
halo_sync(proc, u, &tile_data, &sync_time_taken);
halo_sync(proc, v, &tile_data, &sync_time_taken);
boundary_time_taken = MPI_Wtime() - start;
// Calculate the average time spent syncing
double avg_sync_time_taken = 1.0;
MPI_Reduce(&sync_time_taken, &avg_sync_time_taken, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
avg_sync_time_taken = avg_sync_time_taken / (double) nprocs;
if (proc == 0) {
// Output timing data
printf("\n --- Timestep %f of %f ---\n", t, t_end);
printf("timestep_time_taken: %f\n", timestep_time_taken);
printf("compute_velocity_time_taken: %f\n", compute_velocity_time_taken);
printf("rhs_time_taken: %f\n", rhs_time_taken);
printf("possion_time_taken: %f\n", possion_time_taken);
printf("update_velocity_time_taken: %f\n", update_velocity_time_taken);
printf("boundary_time_taken: %f\n", boundary_time_taken);
printf("sync_time_taken: %f\n", avg_sync_time_taken);
printf("possion_p_loop_time_taken: %f\n", possion_p_loop_time_taken);
printf("possion_res_loop_time_taken: %f\n", possion_res_loop_time_taken);
}
} /* End of main loop */
// Copy all the tiles to the root node (0) to save the output
sync_tile_to_root(proc, u, &tile_data);
sync_tile_to_root(proc, v, &tile_data);
sync_tile_to_root(proc, f, &tile_data);
sync_tile_to_root(proc, g, &tile_data);
sync_tile_to_root(proc, p, &tile_data);
if (outfile != NULL && strcmp(outfile, "") != 0 && proc == 0) {
write_bin(u, v, p, flag, imax, jmax, xlength, ylength, outfile);
}
free_matrix(u);
free_matrix(v);
free_matrix(f);
free_matrix(g);
free_matrix(p);
free_matrix(rhs);
free_matrix(flag);
free_tile_data(&tile_data);
printf("Program total time: %f", MPI_Wtime() - program_start);
MPI_Finalize();
return 0;
}
/* Save the simulation state to a file */
void write_bin(float **u, float **v, float **p, char **flag,
int imax, int jmax, float xlength, float ylength, char* file)
{
int i;
FILE *fp;
fp = fopen(file, "wb");
if (fp == NULL) {
fprintf(stderr, "Could not open file '%s': %s\n", file,
strerror(errno));
return;
}
fwrite(&imax, sizeof(int), 1, fp);
fwrite(&jmax, sizeof(int), 1, fp);
fwrite(&xlength, sizeof(float), 1, fp);
fwrite(&ylength, sizeof(float), 1, fp);
for (i=0;i<imax+2;i++) {
fwrite(u[i], sizeof(float), jmax+2, fp);
fwrite(v[i], sizeof(float), jmax+2, fp);
fwrite(p[i], sizeof(float), jmax+2, fp);
fwrite(flag[i], sizeof(char), jmax+2, fp);
}
printf("%f %f %f\n", v[170][20], v[169][20], v[171][20]);
fclose(fp);
}
/* Read the simulation state from a file */
int read_bin(float **u, float **v, float **p, char **flag,
int imax, int jmax, float xlength, float ylength, char* file)
{
int i,j;
FILE *fp;
if (file == NULL) return -1;
if ((fp = fopen(file, "rb")) == NULL) {
fprintf(stderr, "Could not open file '%s': %s\n", file,
strerror(errno));
fprintf(stderr, "Generating default state instead.\n");
return -1;
}
fread(&i, sizeof(int), 1, fp);
fread(&j, sizeof(int), 1, fp);
float xl, yl;
fread(&xl, sizeof(float), 1, fp);
fread(&yl, sizeof(float), 1, fp);
if (i!=imax || j!=jmax) {
fprintf(stderr, "Warning: imax/jmax have wrong values in %s\n", file);
fprintf(stderr, "%s's imax = %d, jmax = %d\n", file, i, j);
fprintf(stderr, "Program's imax = %d, jmax = %d\n", imax, jmax);
return 1;
}
if (xl!=xlength || yl!=ylength) {
fprintf(stderr, "Warning: xlength/ylength have wrong values in %s\n", file);
fprintf(stderr, "%s's xlength = %g, ylength = %g\n", file, xl, yl);
fprintf(stderr, "Program's xlength = %g, ylength = %g\n", xlength,
ylength);
return 1;
}
for (i=0; i<imax+2; i++) {
fread(u[i], sizeof(float), jmax+2, fp);
fread(v[i], sizeof(float), jmax+2, fp);
fread(p[i], sizeof(float), jmax+2, fp);
fread(flag[i], sizeof(char), jmax+2, fp);
}
fclose(fp);
return 0;
}
static void print_usage(void)
{
fprintf(stderr, "Try '%s --help' for more information.\n", progname);
}
static void print_version(void)
{
fprintf(stderr, "%s %s\n", PACKAGE, VERSION);
}
static void print_help(void)
{
fprintf(stderr, "%s. A simple computational fluid dynamics tutorial.\n\n",
PACKAGE);
fprintf(stderr, "Usage: %s [OPTIONS]...\n\n", progname);
fprintf(stderr, " -h, --help Print a summary of the options\n");
fprintf(stderr, " -V, --version Print the version number\n");
fprintf(stderr, " -v, --verbose=LEVEL Set the verbosity level. 0 is silent\n");
fprintf(stderr, " -x, --imax=IMAX Set the number of interior cells in the X direction\n");
fprintf(stderr, " -y, --jmax=JMAX Set the number of interior cells in the Y direction\n");
fprintf(stderr, " -t, --t-end=TEND Set the simulation end time\n");
fprintf(stderr, " -d, --del-t=DELT Set the simulation timestep size\n");
fprintf(stderr, " -i, --infile=FILE Read the initial simulation state from this file\n");
fprintf(stderr, " (default is 'karman.bin')\n");
fprintf(stderr, " -o, --outfile=FILE Write the final simulation state to this file\n");
fprintf(stderr, " (default is 'karman.bin')\n");
}
|
render.h | #ifndef _RENDER_H_
#define _RENDER_H_
#include <iostream>
#include "cameras.h"
#include "ppm.h"
#include "radiance.h"
#include "random.h"
namespace edupt
{
int render_dof(const int width, const int height, const int samples, const int supersamples, double focus,
double aperture)
{
// カメラ位置
const Vec camera_position = Vec(50.0, 52.0, 220.0);
const Vec camera_dir = normalize(Vec(0.0, -0.04, -1.0));
const Vec camera_up = Vec(0.0, 1.0, 0.0);
// ワールド座標系でのスクリーンの大きさ
const double screen_width = 30.0 * width / height;
const double screen_height = 30.0;
// スクリーンまでの距離
const double screen_dist = 40.0;
// スクリーンを張るベクトル
const Vec screen_x = normalize(cross(camera_dir, camera_up)) * screen_width;
const Vec screen_y = normalize(cross(screen_x, camera_dir)) * screen_height;
const Vec screen_center = camera_position + camera_dir * screen_dist;
Color *image = new Color[width * height];
std::random_device seed_gen_;
std::mt19937 engine_ = std::mt19937(seed_gen_());
std::cout << width << "x" << height << " " << samples * (supersamples * supersamples) << " spp" << std::endl;
DoFCamera camera(width, height, screen_height, screen_dist, camera_position, camera_dir, camera_up,
supersamples, aperture, focus, engine_);
// PinholeCamera camera(width, height, screen_height, screen_dist, camera_position, camera_dir, camera_up,
// supersamples);
// OpenMP
// #pragma omp parallel for schedule(dynamic, 1) num_threads(4)
for (int y = 0; y < height; y++)
{
std::cerr << "Rendering (y = " << y << ") " << (100.0 * y / (height - 1)) << "%" << std::endl;
// Random rnd(y + 1);
ValueSampler<double> rnd(0, 1);
for (int x = 0; x < width; x++)
{
const int image_index = (height - y - 1) * width + x;
// supersamples x supersamples のスーパーサンプリング
for (int sy = 0; sy < supersamples; sy++)
{
for (int sx = 0; sx < supersamples; sx++)
{
Color accumulated_radiance = Color();
// 一つのサブピクセルあたりsamples回サンプリングする
for (int s = 0; s < samples; s++)
{
// accumulated_radiance =
// accumulated_radiance + radiance(camera.get_ray(x, y, sx, sy), &rnd, 0) / samples /
// (supersamples * supersamples);
accumulated_radiance =
accumulated_radiance + radiance_loop(camera.get_ray(x, y, sx, sy), &rnd) / samples /
(supersamples * supersamples);
}
image[image_index] = image[image_index] + accumulated_radiance;
}
}
}
}
// 出力
save_ppm_file(std::string("image" + std::to_string(focus) + ".ppm"), image, width, height);
return 0;
}
int render(const int width, const int height, const int samples, const int supersamples)
{
// カメラ位置
const Vec camera_position = Vec(50.0, 52.0, 220.0);
const Vec camera_dir = normalize(Vec(0.0, -0.04, -1.0));
const Vec camera_up = Vec(0.0, 1.0, 0.0);
// ワールド座標系でのスクリーンの大きさ
const double screen_width = 30.0 * width / height;
const double screen_height = 30.0;
// スクリーンまでの距離
const double screen_dist = 40.0;
// スクリーンを張るベクトル
const Vec screen_x = normalize(cross(camera_dir, camera_up)) * screen_width;
const Vec screen_y = normalize(cross(screen_x, camera_dir)) * screen_height;
const Vec screen_center = camera_position + camera_dir * screen_dist;
Color *image = new Color[width * height];
std::cout << width << "x" << height << " " << samples * (supersamples * supersamples) << " spp" << std::endl;
// OpenMP
// #pragma omp parallel for schedule(dynamic, 1) num_threads(4)
for (int y = 0; y < height; y++)
{
std::cerr << "Rendering (y = " << y << ") " << (100.0 * y / (height - 1)) << "%" << std::endl;
// Random rnd(y + 1);
ValueSampler<double> rnd(0, 1);
for (int x = 0; x < width; x++)
{
const int image_index = (height - y - 1) * width + x;
// supersamples x supersamples のスーパーサンプリング
for (int sy = 0; sy < supersamples; sy++)
{
for (int sx = 0; sx < supersamples; sx++)
{
Color accumulated_radiance = Color();
// 一つのサブピクセルあたりsamples回サンプリングする
for (int s = 0; s < samples; s++)
{
const double rate = (1.0 / supersamples);
const double r1 = sx * rate + rate / 2.0;
const double r2 = sy * rate + rate / 2.0;
// スクリーン上の位置
const Vec screen_position = screen_center + screen_x * ((r1 + x) / width - 0.5) +
screen_y * ((r2 + y) / height - 0.5);
// レイを飛ばす方向
const Vec dir = normalize(screen_position - camera_position);
accumulated_radiance = accumulated_radiance + radiance_loop(Ray(camera_position, dir), &rnd) /
samples / (supersamples * supersamples);
}
image[image_index] = image[image_index] + accumulated_radiance;
}
}
}
}
// 出力
save_ppm_file(std::string("image_1.ppm"), image, width, height);
return 0;
}
}; // namespace edupt
#endif
|
jacobi-ompacc-multiGPU.c | /*
* Rectangular matrix multiplication, started from MIT Cilk matmul.cilk example
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include "omp.h"
#include <pthread.h>
#include <string.h>
#define REAL double
/* in second */
#define read_timer() omp_get_wtime()
/* in ms */
#define read_timer_ms() (omp_get_wtime()*1000.0)
#define MAX_GPU_COUNT 4
int mits=5000;
REAL tol=0.0000000001,relax=1.0,alpha=0.0543;
REAL error_ref= 9.213041E-04, resid_ref = 2.355794E-08; // depending on MSIZE and precision (double vs. float) !!
void initialize(REAL *u, REAL *uold, REAL *f, REAL* dx, REAL* dy, int m, int n)
{
int i,j, xx,yy;
//double PI=3.1415926;
*dx = 2.0 / (n-1);
*dy = 2.0 / (m-1);
/* Initialize initial condition and RHS */
#pragma omp parallel for private(xx,yy,j,i)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx =(int)( -1.0 + *dx * (i-1));
yy = (int)(-1.0 + *dy * (j-1)) ;
u[j + i * m] = 0.0;
uold[j + i * m] = 0.0;
f[j + i * m] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\
- 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy);
}
}
double maxerror(REAL *A, REAL *B, int n)
{
int i, j;
double error = 0.0;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
double diff = (A[i * n + j] - B[i * n + j]) / A[i * n + j];
// printf("%4f -- %4f\n", A[i*n+j], B[i*n+j]);
if (diff < 0)
diff = -diff;
if (diff > error)
error = diff;
}
}
return error;
}
void jacobi_GPU(REAL *u, REAL *uold, REAL *f, REAL dx, REAL dy, int offset, int m, int n, double* error)
{
double omega;
int i,j,k;
double resid,ax,ay,b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega=relax;
/*
* Initialize coefficients */
ax = 1.0/(dx*dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y-direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
int tid = omp_get_thread_num();
double err_tmp = 0.0;
//#pragma omp parallel
{
//#pragma omp target device (tid) map(tofrom: u[offset*m:m*n]) map(to: uold[offset*m:m*n],f[offset*m:m*n],m,n, offset,ax,ay,b,omega)
#pragma omp parallel for shared(uold,u,f, offset, ax,ay,b,omega,n) private(resid,j,i) reduction(+:err_tmp) nowait
for (i=offset+1;i<offset+(n-1);i++)
{
for (j=1;j<(m-1);j++)
{
resid = (ax*(uold[j + (i-1)*m] + uold[j + (i+1)*m])\
+ ay*(uold[j-1 + i*m] + uold[j+1 + i*m])+ b * uold[j + i*m] - f[j+i*m])/b;
u[j+i*m] = uold[j+i*m] - omega * resid;
err_tmp = err_tmp + resid*resid ;
}
}
}
/* omp end parallel */
*error += err_tmp;
}
void jacobi_omp(REAL *u, REAL *uold, REAL *f, REAL dx, REAL dy, int m, int n)
{
double omega;
int i,j,k;
double error,resid,ax,ay,b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega=relax;
/*
* Initialize coefficients */
ax = 1.0/(dx*dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y-direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
REAL* tmp;
while ((k<=mits)&&(error>tol))
{
error = 0.0;
/* Copy new solution into old */
tmp = u;
u = uold;
uold = tmp;
#pragma omp parallel
{
#pragma omp for private(resid,j,i) reduction(+:error) nowait
for (i=1;i<(n-1);i++)
for (j=1;j<(m-1);j++)
{
resid = (ax*(uold[j + (i-1)*m] + uold[j + (i+1)*m])\
+ ay*(uold[j-1 + i*m] + uold[j+1 + i*m])+ b * uold[j + i*m] - f[j+i*m])/b;
u[j+i*m] = uold[j+i*m] - omega * resid;
error = error + resid*resid ;
}
}
/* omp end parallel */
/* Error check */
k = k + 1;
if (k%500==0)
printf("OMP_run: finished %d iteration.\n",k);
error = sqrt(error)/(n*m);
// printf("%d %e %e\n",k,error,tol);
} /* End iteration loop */
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n", error);
printf("Residual_ref :%E\n", resid_ref);
printf ("Diff ref=%E\n", fabs(error-resid_ref));
assert (fabs(error-resid_ref)/resid_ref < 1E-5);
}
int main(int argc, char *argv[])
{
int m,n, idev;
int num_threads;
REAL *u, *uold, *f;
REAL dx,dy;
double seq_elapsed, omp_for_elapsed, acc_elapsed;
int halosize = 1;
// if (argc != 3) {
// fprintf(stderr, "Usage: matmul <m> <n>\n");
// exit(1);
// }
// m = atoi(argv[1]);
// n = atoi(argv[2]);
m = 512;
n = 512;
u = (REAL*)malloc(m * n * sizeof(REAL));
uold = (REAL*)malloc(m * n * sizeof(REAL));
f = (REAL*)malloc(m * n * sizeof(REAL));
/* openmp parallel for version */
initialize(u, uold, f, &dx, &dy, m, n);
omp_for_elapsed = omp_get_wtime();
jacobi_omp(u, uold, f, dx, dy, m,n);
omp_for_elapsed = omp_get_wtime() - omp_for_elapsed;
initialize(u, uold, f, &dx, &dy, m, n);
int GPU_N = 0;
cudaGetDeviceCount(&GPU_N);
if (GPU_N > MAX_GPU_COUNT)
{
GPU_N = MAX_GPU_COUNT;
}
printf("CUDA-capable device count: %i\n", GPU_N);
omp_set_num_threads(GPU_N);
#pragma omp parallel
{
#pragma omp master
{ num_threads = omp_get_num_threads();
}
}
double ompacc_time = read_timer();
acc_elapsed = omp_get_wtime();
REAL* tmp;
double* error;
double error_sum;
error = (double*)malloc(sizeof(double)*GPU_N);
#pragma omp parallel shared (GPU_N, u, uold, f, m, n, error,error_sum) private(idev)
// for (idev = 0; idev < GPU_N; idev++)
{
int tid = omp_get_thread_num();
cudaSetDevice(tid);
int size = n / GPU_N;
int offset = size * tid;
if(tid < n%GPU_N)
{
size++;
}
if(tid >= n%GPU_N)
offset += n%GPU_N;
else
offset += tid;
if(tid != 0)
offset = offset - halosize;
size = size + halosize;
if(tid != GPU_N-1 && tid != 0)
size = size + halosize;
printf("thread %d working on GPU devices %d with size %d copying data from y_ompacc with offset %d\n",tid, tid, size,offset);
int i, j, k;
k = 1;
error_sum = 10.0 * tol;
/* Copy new solution into old */
while ((k<=mits)&&(error_sum>tol))
{
#pragma omp barrier
#pragma omp master
{
tmp = u;
u = uold;
uold = tmp;
error_sum = 0.0;
}
error[tid] = 0.0;
#pragma omp barrier
jacobi_GPU(u, uold, f, dx, dy, offset, m,size,&error[tid]);
/* Error check */
k = k + 1;
#pragma omp master
{
if (k%500==0)
printf("GPU_run: finished %d iteration.\n",k);
}
#pragma omp critical
{
error_sum += error[tid];
// printf("thread %d error = %f\n",tid, error_sum);
}
#pragma omp barrier
#pragma omp master
{
error_sum = sqrt(error_sum)/(n*m);
// printf("%d %e %e\n",k,error_sum,tol);
}
} /* End iteration loop */
#pragma omp critical
#pragma omp master
{
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n", error_sum);
printf("Residual_ref :%E\n", resid_ref);
printf ("Diff ref=%E\n", fabs(error_sum-resid_ref));
assert (fabs(error_sum-resid_ref)/resid_ref < 1E-5);
}
} // end of idev loop
acc_elapsed = omp_get_wtime() - acc_elapsed;
free(error);
printf("=======================================================================\n");
printf("\t\tmatmul(%dx%d) example on %d threads(cores)\n", n, n, num_threads);
printf("-----------------------------------------------------------------------\n");
printf("Performance: Runtime (s)\t MFLOPS\t\t\t Error\n");
printf("-----------------------------------------------------------------------\n");
// printf("Sequential : %4f \t\t %4f\t\t%g\n", seq_elapsed, 2.0 * n * n * n / (1.0e6 * (seq_elapsed)), maxerror(C_seq, C_seq, n));
printf("OMP For : %4f \t\t %4f\t\t\n", omp_for_elapsed, 2.0 * n * n * n / (1.0e6 * (omp_for_elapsed)));
printf("ACC For : %4f \t\t %4f\t\t\n", acc_elapsed, 2.0 * n * n * n / (1.0e6 * (acc_elapsed)));
free(u);
free(uold);
free(f);
return 0;
}
|
GB_unop__identity_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__(none))
// op(A') function: GB (_unop_tran__identity_uint32_uint32)
// C type: uint32_t
// A type: uint32_t
// cast: uint32_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info GB (_unop_apply__(none))
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint32_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cmpflx.c | /*
A simple 2D hydro code
(C) Romain Teyssier : CEA/IRFU -- original F90 code
(C) Pierre-Francois Lavallee : IDRIS -- original F90 code
(C) Guillaume Colin de Verdiere : CEA/DAM -- for the C version
*/
/*
This software is governed by the CeCILL license under French law and
abiding by the rules of distribution of free software. You can use,
modify and/ or redistribute the software under the terms of the CeCILL
license as circulated by CEA, CNRS and INRIA at the following URL
"http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's author, the holder of the
economic rights, and the successive licensors have only limited
liability.
In this respect, the user's attention is drawn to the risks associated
with loading, using, modifying and/or developing or reproducing the
software by the user in light of its specific status of free software,
that may mean that it is complicated to manipulate, and that also
therefore means that it is reserved for developers and experienced
professionals having in-depth computer knowledge. Users are therefore
encouraged to load and test the software's suitability as regards their
requirements in conditions enabling the security of their systems and/or
data to be ensured and, more generally, to use and operate it in the
same conditions as regards security.
The fact that you are presently reading this means that you have had
knowledge of the CeCILL license and that you accept its terms.
*/
#include <math.h>
#include <malloc.h>
// #include <unistd.h>
// #include <stdlib.h>
#include <string.h>
#include <stdio.h>
#ifndef HMPP
#include "parametres.h"
#include "utils.h"
#include "cmpflx.h"
#include "perfcnt.h"
void
cmpflx(const int narray,
const int Hnxyt,
const int Hnvar,
const real_t Hgamma,
const int slices,
const int Hstep,
real_t qgdnv[Hnvar][Hstep][Hnxyt],
real_t flux[Hnvar][Hstep][Hnxyt]) {
int nface, i, IN;
real_t entho, ekin, etot;
WHERE("cmpflx");
int s;
nface = narray;
entho = one / (Hgamma - one);
FLOPS(1, 1, 0, 0);
// Compute fluxes
#pragma omp parallel for private(s, i, ekin, etot), shared(flux)
for (s = 0; s < slices; s++) {
for (i = 0; i < nface; i++) {
real_t qgdnvID = qgdnv[ID][s][i];
real_t qgdnvIU = qgdnv[IU][s][i];
real_t qgdnvIP = qgdnv[IP][s][i];
real_t qgdnvIV = qgdnv[IV][s][i];
// Mass density
real_t massDensity = qgdnvID * qgdnvIU;
flux[ID][s][i] = massDensity;
// Normal momentum
flux[IU][s][i] = massDensity * qgdnvIU + qgdnvIP;
// Transverse momentum 1
flux[IV][s][i] = massDensity * qgdnvIV;
// Total energy
ekin = half * qgdnvID * (Square(qgdnvIU) + Square(qgdnvIV));
etot = qgdnvIP * entho + ekin;
flux[IP][s][i] = qgdnvIU * (etot + qgdnvIP);
}
}
{
int nops = slices * nface;
FLOPS(13 * nops, 0 * nops, 0 * nops, 0 * nops);
}
// Other advected quantities
if (Hnvar > IP) {
for (s = 0; s < slices; s++) {
for (IN = IP + 1; IN < Hnvar; IN++) {
for (i = 0; i < nface; i++) {
flux[IN][s][i] = flux[IN][s][i] * qgdnv[IN][s][i];
}
}
}
}
} // cmpflx
#endif
//EOF
|
threadpool.h | /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/* Modifications Copyright (c) Microsoft. */
#pragma once
#include <string>
#include <vector>
#include <functional>
#include <memory>
#include "core/common/common.h"
#include "core/platform/env.h"
#include "core/common/optional.h"
#include <functional>
#include <memory>
// This file use PIMPL to avoid having eigen headers here
namespace Eigen {
class Allocator;
class ThreadPoolInterface;
} // namespace Eigen
namespace onnxruntime {
struct TensorOpCost {
double bytes_loaded;
double bytes_stored;
double compute_cycles;
};
template <typename Environment>
class ThreadPoolTempl;
namespace concurrency {
class ExtendedThreadPoolInterface;
class LoopCounter;
class ThreadPool {
public:
#ifdef _WIN32
using NAME_CHAR_TYPE = wchar_t;
#else
using NAME_CHAR_TYPE = char;
#endif
// Constructs a pool for running with with "degree_of_parallelism" threads with
// specified "name". env->StartThread() is used to create individual threads
// with the given ThreadOptions. If "low_latency_hint" is true the thread pool
// implementation may use it as a hint that lower latency is preferred at the
// cost of higher CPU usage, e.g. by letting one or more idle threads spin
// wait. Conversely, if the threadpool is used to schedule high-latency
// operations like I/O the hint should be set to false.
//
// REQUIRES: degree_of_parallelism > 0
// The allocator parameter is only used for creating a Eigen::ThreadPoolDevice to be used with Eigen Tensor classes.
ThreadPool(Env* env,
const ThreadOptions& thread_options,
const NAME_CHAR_TYPE* name,
int degree_of_parallelism,
bool low_latency_hint);
// Waits until all scheduled work has finished and then destroy the
// set of threads.
~ThreadPool();
// Schedules fn() for execution in the pool of threads. The function may run
// synchronously if it cannot be enqueued. This will occur if the thread pool's
// degree-of-parallelism is 1, but it may also occur for implementation-dependent
// reasons such as if queues used for buffering work are full.
void Schedule(std::function<void()> fn);
// Returns the number of shards used by ParallelForFixedBlockSizeScheduling
// with these parameters.
int NumShardsUsedByFixedBlockSizeScheduling(std::ptrdiff_t total,
std::ptrdiff_t block_size) const;
// ParallelFor shards the "total" units of work assuming each unit of work
// having roughly "cost_per_unit" cost, in cycles. Each unit of work is
// indexed 0, 1, ..., total - 1. Each shard contains 1 or more units of work
// and the total cost of each shard is roughly the same.
//
// "cost_per_unit" is an estimate of the number of CPU cycles (or nanoseconds
// if not CPU-bound) to complete a unit of work. Overestimating creates too
// many shards and CPU time will be dominated by per-shard overhead, such as
// Context creation. Underestimating may not fully make use of the specified
// parallelism, and may also cause inefficiencies due to load balancing
// issues and stragglers.
void ParallelFor(std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn);
static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, double cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn) {
TryParallelFor(tp, total, TensorOpCost{0, 0, static_cast<double>(cost_per_unit)}, fn);
}
void ParallelFor(std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t)>& fn);
static void TryParallelFor(concurrency::ThreadPool* tp, std::ptrdiff_t total, const TensorOpCost& cost_per_unit,
const std::function<void(std::ptrdiff_t first, std::ptrdiff_t last)>& fn);
// Return the degree of parallelism that code should assume when using the thread pool.
// This API takes into account if OpenMP is enabled/disabled, and if the thread pool ptr is
// nullptr. It decouples the degree of parallelism for use with the thread pool from
// the implementation choice of whether this matches the number of threads created in
// the pool.
//
// Currently, a loop with degree-of-parallelism N is supported by a pool of N-1 threads
// working in combination with the thread initiating the loop.
static int DegreeOfParallelism(const concurrency::ThreadPool* tp);
// Directly schedule the 'total' tasks to the underlying threadpool, without
// cutting them by halves
void SimpleParallelFor(std::ptrdiff_t total, const std::function<void(std::ptrdiff_t)>& fn);
inline static void TrySimpleParallelFor(ThreadPool* tp, std::ptrdiff_t total,
const std::function<void(std::ptrdiff_t)>& fn) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(tp);
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < total; ++i) {
fn(i);
}
#else
if (tp != nullptr) {
tp->SimpleParallelFor(total, fn);
} else {
for (std::ptrdiff_t i = 0; i < total; ++i) {
// In many cases, fn can be inlined here.
fn(i);
}
}
#endif
}
/**
* Tries to call the given function in parallel, with calls split into (num_batches) batches.
*\param num_batches If it is zero, it will be replaced to the value of DegreeOfParallelism().
*\param fn A std::function or STL style functor with signature of "void f(int32_t);"
* Pitfall: Caller should cap `num_batches` to a reasonable value based on the cost of `fn` and the value of `total`.
*For example, if fn is as simple as: int sum=0; fn = [&](int i){sum +=i;} and `total` is 100, then num_batches should
*be just 1.
*
* ```
**/
template <typename F>
inline static void TryBatchParallelFor(ThreadPool* tp, std::ptrdiff_t total, F&& fn, std::ptrdiff_t num_batches) {
#ifdef _OPENMP
ORT_UNUSED_PARAMETER(tp);
ORT_UNUSED_PARAMETER(num_batches);
#pragma omp parallel for
for (std::ptrdiff_t i = 0; i < total; ++i) {
fn(i);
}
#else
if (tp == nullptr) {
for (std::ptrdiff_t i = 0; i < total; ++i) {
// In many cases, fn can be inlined here.
fn(i);
}
return;
}
if (total <= 0)
return;
if (total == 1) {
fn(0);
return;
}
if (num_batches <= 0) {
num_batches = std::min<ptrdiff_t>(total, DegreeOfParallelism(tp));
}
if (num_batches <= 1) {
for (int i = 0; i < total; i++) {
fn(i);
}
return;
}
tp->SimpleParallelFor(num_batches, [&](std::ptrdiff_t batch_index) {
auto work = PartitionWork(batch_index, num_batches, total);
for (std::ptrdiff_t i = work.start; i < work.end; i++) {
fn(i);
}
});
#endif
}
struct WorkInfo {
std::ptrdiff_t start;
std::ptrdiff_t end;
};
/** Calculate the start and end offsets for a batch.
@remarks Based on MlasPartitionWork
*/
static WorkInfo PartitionWork(std::ptrdiff_t batch_idx, std::ptrdiff_t num_batches, std::ptrdiff_t total_work) {
const std::ptrdiff_t work_per_batch = total_work / num_batches;
const std::ptrdiff_t work_per_batch_extra = total_work % num_batches;
WorkInfo info;
if (batch_idx < work_per_batch_extra) {
info.start = (work_per_batch + 1) * batch_idx;
info.end = info.start + work_per_batch + 1;
} else {
info.start = work_per_batch * batch_idx + work_per_batch_extra;
info.end = info.start + work_per_batch;
}
return info;
}
ORT_DISALLOW_COPY_AND_ASSIGNMENT(ThreadPool);
private:
friend class LoopCounter;
// Returns the number of threads created in the pool. This may be different from the
// value returned by DegreeOfParallelism to code using the pool.
int NumThreads() const;
// Returns current thread id between 0 and NumThreads() - 1, if called from a
// thread in the pool. Returns -1 otherwise.
int CurrentThreadId() const;
// Run fn with up to n degree-of-parallelism enlisting the thread pool for
// help. The degree-of-parallelism includes the caller, and so if n==1
// then the function will run directly in the caller. The fork-join
// synchronization is handled in the thread pool, and so any state captured
// by fn() is safe from concurrent access once RunWithHelp returns.
void RunInParallel(std::function<void()> fn, int n);
// Divides the work represented by the range [0, total) into k shards.
// Calls fn(i*block_size, (i+1)*block_size) from the ith shard (0 <= i < k).
// Each shard may be executed on a different thread in parallel, depending on
// the number of threads available in the pool.
// When (i+1)*block_size > total, fn(i*block_size, total) is called instead.
// Here, k = NumShardsUsedByFixedBlockSizeScheduling(total, block_size).
// Requires 0 < block_size <= total.
void ParallelForFixedBlockSizeScheduling(std::ptrdiff_t total, std::ptrdiff_t block_size,
const std::function<void(std::ptrdiff_t, std::ptrdiff_t)>& fn);
// Return whether or not the calling thread should run a loop of
// num_iterations divided in chunks of block_size in parallel. If not,
// the caller should run the loop sequentially.
bool ShouldParallelizeLoop(const std::ptrdiff_t num_iterations,
const std::ptrdiff_t block_size = 1) const;
ThreadOptions thread_options_;
// If a thread pool is created with degree_of_parallelism != 1 then an underlying
// EigenThreadPool is used to create OS threads and handle work distribution to them.
// If degree_of_parallelism == 1 then underlying_threadpool_ is left as nullptr
// and parallel work is run directly by the caller.
ExtendedThreadPoolInterface* underlying_threadpool_ = nullptr;
// If used, underlying_threadpool_ is instantiated and owned by the ThreadPool.
std::unique_ptr<ThreadPoolTempl<Env> > extended_eigen_threadpool_;
};
} // namespace concurrency
} // namespace onnxruntime
|
CrossValidationMMD.h | /*
* Copyright (c) The Shogun Machine Learning Toolbox
* Written (w) 2016 - 2017 Soumyajit De
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation are those
* of the authors and should not be interpreted as representing official policies,
* either expressed or implied, of the Shogun Development Team.
*/
#ifndef CROSS_VALIDATION_MMD_H_
#define CROSS_VALIDATION_MMD_H_
#include <memory>
#include <algorithm>
#include <numeric>
#include <shogun/lib/SGMatrix.h>
#include <shogun/lib/SGVector.h>
#include <shogun/labels/BinaryLabels.h>
#include <shogun/features/SubsetStack.h>
#include <shogun/evaluation/CrossValidationSplitting.h>
#include <shogun/statistical_testing/internals/mmd/PermutationMMD.h>
using std::unique_ptr;
namespace shogun
{
namespace internal
{
namespace mmd
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS
struct CrossValidationMMD : PermutationMMD
{
CrossValidationMMD(index_t n_x, index_t n_y, index_t num_folds, index_t num_null_samples)
{
ASSERT(n_x>0 && n_y>0);
ASSERT(num_folds>0);
ASSERT(num_null_samples>0);
m_n_x=n_x;
m_n_y=n_y;
m_num_folds=num_folds;
m_num_null_samples=num_null_samples;
m_num_runs=DEFAULT_NUM_RUNS;
m_alpha=DEFAULT_ALPHA;
init();
}
void operator()(const KernelManager& kernel_mgr)
{
REQUIRE(m_rejections.num_rows==m_num_runs*m_num_folds,
"Number of rows in the measure matrix (was %d), has to be >= %d*%d = %d!\n",
m_rejections.num_rows, m_num_runs, m_num_folds, m_num_runs*m_num_folds);
REQUIRE(m_rejections.num_cols==kernel_mgr.num_kernels(),
"Number of columns in the measure matrix (was %d), has to equal to the nunber of kernels (%d)!\n",
m_rejections.num_cols, kernel_mgr.num_kernels());
const index_t size=m_n_x+m_n_y;
const index_t orig_n_x=m_n_x;
const index_t orig_n_y=m_n_y;
SGVector<float64_t> null_samples(m_num_null_samples);
SGVector<float32_t> precomputed_km(size*(size+1)/2);
for (auto k=0; k<kernel_mgr.num_kernels(); ++k)
{
auto kernel=kernel_mgr.kernel_at(k);
for (auto i=0; i<size; ++i)
{
for (auto j=i; j<size; ++j)
{
auto index=i*size-i*(i+1)/2+j;
precomputed_km[index]=kernel->kernel(i, j);
}
}
for (auto current_run=0; current_run<m_num_runs; ++current_run)
{
m_kfold_x->build_subsets();
m_kfold_y->build_subsets();
for (auto current_fold=0; current_fold<m_num_folds; ++current_fold)
{
generate_inds(current_fold);
std::fill(m_inverted_inds.data(), m_inverted_inds.data()+m_inverted_inds.size(), -1);
for (index_t idx=0; idx<m_xy_inds.size(); ++idx)
m_inverted_inds[m_xy_inds[idx]]=idx;
m_stack->add_subset(m_xy_inds);
if (m_permuted_inds.size()!=m_xy_inds.size())
m_permuted_inds=SGVector<index_t>(m_xy_inds.size());
m_inverted_permuted_inds.set_const(-1);
for (auto n=0; n<m_num_null_samples; ++n)
{
std::iota(m_permuted_inds.data(), m_permuted_inds.data()+m_permuted_inds.size(), 0);
CMath::permute(m_permuted_inds);
m_stack->add_subset(m_permuted_inds);
SGVector<index_t> inds=m_stack->get_last_subset()->get_subset_idx();
m_stack->remove_subset();
for (int idx=0; idx<inds.size(); ++idx)
m_inverted_permuted_inds(inds[idx], n)=idx;
}
m_stack->remove_subset();
terms_t terms;
for (auto i=0; i<size; ++i)
{
auto inverted_row=m_inverted_inds[i];
auto idx_base=i*size-i*(i+1)/2;
for (auto j=i; j<size; ++j)
{
auto inverted_col=m_inverted_inds[j];
if (inverted_row!=-1 && inverted_col!=-1)
{
auto idx=idx_base+j;
add_term_upper(terms, precomputed_km[idx], inverted_row, inverted_col);
}
}
}
auto statistic=compute(terms);
#pragma omp parallel for
for (auto n=0; n<m_num_null_samples; ++n)
{
terms_t null_terms;
for (auto i=0; i<size; ++i)
{
auto inverted_row=m_inverted_permuted_inds(i, n);
auto idx_base=i*size-i*(i+1)/2;
for (auto j=i; j<size; ++j)
{
auto inverted_col=m_inverted_permuted_inds(j, n);
if (inverted_row!=-1 && inverted_col!=-1)
{
auto idx=idx_base+j;
if (inverted_row<=inverted_col)
add_term_upper(null_terms, precomputed_km[idx], inverted_row, inverted_col);
else
add_term_upper(null_terms, precomputed_km[idx], inverted_col, inverted_row);
}
}
}
null_samples[n]=compute(null_terms);
}
std::sort(null_samples.data(), null_samples.data()+null_samples.size());
SG_SDEBUG("statistic=%f\n", statistic);
float64_t idx=null_samples.find_position_to_insert(statistic);
SG_SDEBUG("index=%f\n", idx);
auto p_value=1.0-idx/m_num_null_samples;
bool rejected=p_value<m_alpha;
SG_SDEBUG("p-value=%f, alpha=%f, rejected=%d\n", p_value, m_alpha, rejected);
m_rejections(current_run*m_num_folds+current_fold, k)=rejected;
m_n_x=orig_n_x;
m_n_y=orig_n_y;
}
}
}
}
void init()
{
SGVector<int64_t> dummy_labels_x(m_n_x);
SGVector<int64_t> dummy_labels_y(m_n_y);
auto instance_x=new CCrossValidationSplitting(new CBinaryLabels(dummy_labels_x), m_num_folds);
auto instance_y=new CCrossValidationSplitting(new CBinaryLabels(dummy_labels_y), m_num_folds);
m_kfold_x=unique_ptr<CCrossValidationSplitting>(instance_x);
m_kfold_y=unique_ptr<CCrossValidationSplitting>(instance_y);
m_stack=unique_ptr<CSubsetStack>(new CSubsetStack());
const index_t size=m_n_x+m_n_y;
m_inverted_inds=SGVector<index_t>(size);
m_inverted_permuted_inds=SGMatrix<index_t>(size, m_num_null_samples);
}
void generate_inds(index_t current_fold)
{
SGVector<index_t> x_inds=m_kfold_x->generate_subset_inverse(current_fold);
SGVector<index_t> y_inds=m_kfold_y->generate_subset_inverse(current_fold);
std::for_each(y_inds.data(), y_inds.data()+y_inds.size(), [this](index_t& val) { val += m_n_x; });
m_n_x=x_inds.size();
m_n_y=y_inds.size();
if (m_xy_inds.size()!=m_n_x+m_n_y)
m_xy_inds=SGVector<index_t>(m_n_x+m_n_y);
std::copy(x_inds.data(), x_inds.data()+x_inds.size(), m_xy_inds.data());
std::copy(y_inds.data(), y_inds.data()+y_inds.size(), m_xy_inds.data()+x_inds.size());
}
index_t m_num_runs;
index_t m_num_folds;
static constexpr index_t DEFAULT_NUM_RUNS=10;
float64_t m_alpha;
static constexpr float64_t DEFAULT_ALPHA=0.05;
unique_ptr<CCrossValidationSplitting> m_kfold_x;
unique_ptr<CCrossValidationSplitting> m_kfold_y;
unique_ptr<CSubsetStack> m_stack;
SGVector<index_t> m_xy_inds;
SGVector<index_t> m_inverted_inds;
SGMatrix<float64_t> m_rejections;
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
}
}
}
#endif // CROSS_VALIDATION_MMD_H_
|
game.h | #pragma once
#ifndef GAME_H
#define GAME_H
#include <mana/core/memoryallocator.h>
//
#include <chaos/chaos.h>
#include <core/dimensions.h>
#include <core/entity.h>
#include <core/gamestate.h>
#include <core/position.h>
#include <core/scenery.h>
#include <mana/audio/audiomanager.h>
#include <mana/core/inputmanager.h>
#include <mana/graphics/entities/manifoldplanet.h>
#include <mana/graphics/entities/sprite.h>
#include <mana/graphics/shaders/fxaashader.h>
#include <mana/graphics/shaders/manifolddualcontouringshader.h>
#include <mana/graphics/shaders/spriteanimationshader.h>
#include <mana/graphics/shaders/spriteshader.h>
#include <mana/graphics/utilities/camera.h>
#include <mana/graphics/utilities/spriteanimation.h>
#include <mana/mana.h>
#include "core/jobsystem.h"
#include "core/render.h"
#include "utilities/playercamera.h"
#include "utilities/resourcemanager.h"
struct Game {
struct Window* window;
struct SpriteShader sprite_shader;
struct SpriteAnimationShader sprite_animation_shader;
struct FXAAShader fxaa_shader;
struct PlayerCamera player_camera;
struct GameState* game_state;
struct ComponentRegistry render_registry;
struct JobSystem job_system;
struct ResourceManager resource_manager;
struct ManifoldDualContouringShader planet_shader;
struct ManifoldPlanet planet;
};
void game_init(struct Game* game, struct Mana* mana, struct Window* window);
void game_delete(struct Game* game, struct Mana* mana);
void game_update(struct Game* game, struct Mana* mana, double delta_time);
void game_update_camera(struct Game* game, struct Engine* engine);
void game_update_input(struct Game* game, struct Engine* engine);
static inline void game_check_for_window_resize(struct Game* game, struct GPUAPI* gpu_api) {
// When the window is resized everything must be recreated in vulkan
if (gpu_api->vulkan_state->reset_shaders) {
gpu_api->vulkan_state->reset_shaders = 0;
vkDeviceWaitIdle(gpu_api->vulkan_state->device);
fxaa_shader_delete(&game->fxaa_shader, gpu_api);
fxaa_shader_init(&game->fxaa_shader, gpu_api);
game->fxaa_shader.on = 0;
sprite_shader_delete(&game->sprite_shader, gpu_api);
sprite_shader_init(&game->sprite_shader, gpu_api, 0);
sprite_animation_shader_delete(&game->sprite_animation_shader, gpu_api);
sprite_animation_shader_init(&game->sprite_animation_shader, gpu_api, 0);
/*for (int entity_num = 0; entity_num < array_list_size(&game->stage_entity_render_list); entity_num++) {
struct Entity* entity = array_list_get(&game->stage_entity_render_list, entity_num);
(*entity->recreate_func)(entity->entity_data, gpu_api);
}*/
}
}
static inline void game_update_jobs(struct Game* game, struct GPUAPI* gpu_api) {
// Update sprites
/*struct EntityUpdateData* sprites_update_data_pool = malloc(sizeof(struct EntityUpdateData) * array_list_size(&game->stage_entity_render_list));
struct Job* sprites_update_job_pool = malloc(sizeof(struct Job) * array_list_size(&game->stage_entity_render_list));
for (int entity_num = 0; entity_num < array_list_size(&game->stage_entity_render_list); entity_num++) {
struct Entity* entity = array_list_get(&game->stage_entity_render_list, entity_num);
sprites_update_data_pool[entity_num] = (struct EntityUpdateData){.game_handle = game, .entity_handle = entity, .delta_time = delta_time};
sprites_update_job_pool[entity_num] = (struct Job){.job_func = entity_update_job, .job_data = &sprites_update_data_pool[entity_num]};
job_system_enqueue(game->job_system, &sprites_update_job_pool[entity_num]);
}
job_system_start_threads(game->job_system);
job_system_wait(game->job_system);
free(scenery_update_data_pool);
free(scenery_update_job_pool);
free(sprites_update_data_pool);
free(sprites_update_job_pool);*/
}
static inline void game_sort_render_entites(struct Game* game, struct GPUAPI* gpu_api, struct ArrayList* sorted_render_list) {
char* sorted_render_list_key = NULL;
struct MapIter sorted_render_list_iter = map_iter();
// Build list of entities that need to be rendered
while ((sorted_render_list_key = map_next(&game->render_registry.registry, &sorted_render_list_iter)))
array_list_add(sorted_render_list, sorted_render_list_key);
// TODO: Look into multithreaded merge sort
// Sort render list for draw order
for (int render_num = 0; render_num < array_list_size(sorted_render_list); render_num++) {
for (int other_render_num = render_num; other_render_num > 0; other_render_num--) {
struct Position* position_one = component_registry_get(&game->game_state->position_registry, array_list_get(sorted_render_list, other_render_num));
struct Position* position_two = component_registry_get(&game->game_state->position_registry, array_list_get(sorted_render_list, other_render_num - 1));
// TODO: Check within float range
if (position_one->z > position_two->z)
continue;
// TODO: Add if within z range then check y range/implicit function
//if (entity_one->position.y - entity_one->height / 2.0f < entity_two->position.y - entity_two->height / 2.0f)
// continue;
array_list_swap(sorted_render_list, other_render_num, other_render_num - 1);
}
}
}
static inline void game_render_entities(struct Game* game, struct GPUAPI* gpu_api, struct ArrayList* sorted_render_list) {
for (int render_num = 0; render_num < array_list_size(sorted_render_list); render_num++) {
char* entity_id = array_list_get(sorted_render_list, render_num);
struct Render* next_render = component_registry_get(&game->render_registry, entity_id);
sprite_update_uniforms(&next_render->sprite, gpu_api);
sprite_render(&next_render->sprite, gpu_api);
}
}
static inline void game_hotswap_scenery(struct Game* game, struct GPUAPI* gpu_api) {
// Clear any existing scenery
if (game->game_state->scenery_registry.registry.num_nodes > 0) {
char* scenery_key = NULL;
struct MapIter scenery_iter = map_iter();
while ((scenery_key = map_next(&game->game_state->scenery_registry.registry, &scenery_iter))) {
sprite_delete(&((struct Render*)map_get(&game->render_registry.registry, scenery_key))->sprite, gpu_api);
component_registry_remove(&game->game_state->position_registry, scenery_key);
component_registry_remove(&game->game_state->dimensions_registry, scenery_key);
component_registry_remove(&game->render_registry, scenery_key);
// TODO: Write better way to remove single entity
for (int entity_num = 0; entity_num < vector_size(&game->game_state->entities); entity_num++) {
if (strcmp(scenery_key, ((struct Entity*)vector_get(&game->game_state->entities, entity_num))->entity_id)) {
vector_remove(&game->game_state->entities, entity_num);
break;
}
}
}
component_registry_delete(&game->game_state->scenery_registry);
component_registry_init(&game->game_state->scenery_registry, sizeof(struct Scenery));
}
// Load scenery from xml file
struct XmlNode* game_stage_node = xml_parser_load_xml_file("./assets/stages/gamestage.xml");
struct XmlNode* scenery_node = xml_node_get_child(game_stage_node, "scenery");
const char* scenery_list_key = NULL;
struct MapIter scenery_list_iter = map_iter();
struct SceneryBucket {
struct Scenery scenery[64];
struct XmlNode* node[64];
} texture_settings_bucket = {0};
// Find each piece of unique scenery
int total_scenery_num = 0;
while ((scenery_list_key = map_next(scenery_node->child_nodes, &scenery_list_iter)))
texture_settings_bucket.node[total_scenery_num++] = array_list_get(*((struct ArrayList**)map_get(scenery_node->child_nodes, scenery_list_key)), 0);
// Build components from xml data
//#pragma omp parallel for schedule(dynamic)
for (int scenery_num = 0; scenery_num < total_scenery_num; scenery_num++) {
struct XmlNode* position_node = xml_node_get_child(texture_settings_bucket.node[scenery_num], "position");
float scale = atof(xml_node_get_data(xml_node_get_child(texture_settings_bucket.node[scenery_num], "scale")));
int repeat_factor = atoi(xml_node_get_data(xml_node_get_child(texture_settings_bucket.node[scenery_num], "repeat")));
float offset = atof(xml_node_get_data(xml_node_get_child(texture_settings_bucket.node[scenery_num], "offset")));
for (int repeat_num = 0; repeat_num < repeat_factor; repeat_num++) {
struct Entity new_scenery_entity = {0};
entity_init(&new_scenery_entity);
vector_push_back(&game->game_state->entities, &new_scenery_entity);
struct Scenery new_scenery = (struct Scenery){.texture_path = xml_node_get_attribute(position_node, "path"), .repeat_factor = repeat_factor, .offset = offset};
component_registry_set(&game->game_state->scenery_registry, &new_scenery_entity, &new_scenery);
struct Render new_scenery_render = {0};
sprite_init(&new_scenery_render.sprite, gpu_api, &game->sprite_shader.shader, texture_cache_get(&game->resource_manager.texture_cache, new_scenery.texture_path));
struct Dimensions new_scenery_dimensions = {.direction = 1.0f, .scale = scale, .height = new_scenery_render.sprite.height * scale, .width = new_scenery_render.sprite.width * scale};
component_registry_set(&game->game_state->dimensions_registry, &new_scenery_entity, &new_scenery_dimensions);
struct Position new_scenery_position = (struct Position){.x = atof(xml_node_get_data(xml_node_get_child(position_node, "x"))) + (new_scenery_dimensions.width * repeat_num) * new_scenery.offset, .y = atof(xml_node_get_data(xml_node_get_child(position_node, "y"))), .z = atof(xml_node_get_data(xml_node_get_child(position_node, "z")))};
component_registry_set(&game->game_state->position_registry, &new_scenery_entity, &new_scenery_position);
new_scenery_render.sprite.position = (vec3){.x = new_scenery_position.x, .y = new_scenery_position.y, .z = new_scenery_position.z};
new_scenery_render.sprite.scale = (vec3){.x = scale, .y = scale, .z = scale};
component_registry_set(&game->render_registry, &new_scenery_entity, &new_scenery_render);
}
}
xml_parser_delete(game_stage_node);
}
#endif //GAME_H
|
rar_fmt_plug.c | /* RAR 3.x cracker patch for JtR. Hacked together during
* April of 2011 by Dhiru Kholia <dhiru.kholia at gmail.com> for GSoC.
* magnum added -p mode support, using code based on libclamav
* and OMP, AES-NI and OpenCL support.
* jimf added dyna_salt support, Oct 2014.
*
* This software is Copyright (c) 2011, Dhiru Kholia <dhiru.kholia at gmail.com>
* and Copyright (c) 2012, magnum and it is hereby released to the general public
* under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* This code is based on the work of Alexander L. Roshal (C)
*
* The unRAR sources may be used in any software to handle RAR
* archives without limitations free of charge, but cannot be used
* to re-create the RAR compression algorithm, which is proprietary.
* Distribution of modified unRAR sources in separate form or as a
* part of other software is permitted, provided that it is clearly
* stated in the documentation and source comments that the code may
* not be used to develop a RAR (WinRAR) compatible archiver.
*
* Huge thanks to Marc Bevand <m.bevand (at) gmail.com> for releasing unrarhp
* (http://www.zorinaq.com/unrarhp/) and documenting the RAR encryption scheme.
* This patch is made possible by unrarhp's documentation.
*
* http://anrieff.net/ucbench/technical_qna.html is another useful reference
* for RAR encryption scheme.
*
* Thanks also to Pavel Semjanov for crucial help with Huffman table checks.
*
* For type = 0 for files encrypted with "rar -hp ..." option
* archive_name:$RAR3$*type*hex(salt)*hex(partial-file-contents):type::::archive_name
*
* For type = 1 for files encrypted with "rar -p ..." option
* archive_name:$RAR3$*type*hex(salt)*hex(crc)*PACK_SIZE*UNP_SIZE*archive_name*offset-for-ciphertext*method:type::file_name
*
* or (inlined binary)
*
* archive_name:$RAR3$*type*hex(salt)*hex(crc)*PACK_SIZE*UNP_SIZE*1*hex(full encrypted file)*method:type::file_name
*
*/
#include "arch.h"
#if ARCH_ALLOWS_UNALIGNED
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rar;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rar);
#else
#include <string.h>
#include <errno.h>
#if AC_BUILT
#include "autoconfig.h"
#endif
#if _MSC_VER || __MINGW32__ || __MINGW64__ || __CYGWIN__ || HAVE_WINDOWS_H
#include "win32_memmap.h"
#if !defined(__CYGWIN__) && !defined(__MINGW64__) && !defined(__MINGW32__)
#include "mmap-windows.c"
#elif defined HAVE_MMAP
#include <sys/mman.h>
#endif
#elif defined(HAVE_MMAP)
#include <sys/mman.h>
#endif
#include "sha.h"
#include "crc32.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "dyna_salt.h"
#include "memory.h"
#include "params.h"
#include "options.h"
#include "unicode.h"
#include "johnswap.h"
#include "unrar.h"
#include "config.h"
#include "jumbo.h"
#define FORMAT_LABEL "rar"
#define FORMAT_NAME "RAR3"
#ifdef DEBUG
#define BENCHMARK_COMMENT " (1-16 characters)"
#else
#define BENCHMARK_COMMENT " (4 characters)"
#endif
#define BENCHMARK_LENGTH -1
#define UNICODE_LENGTH (2 * PLAINTEXT_LENGTH)
#define BINARY_SIZE 0
#define BINARY_ALIGN MEM_ALIGN_NONE
#define SALT_SIZE sizeof(rarfile*)
#define SALT_ALIGN sizeof(rarfile*)
#ifdef SIMD_COEF_32
#include "simd-intrinsics.h"
#define NBKEYS (SIMD_COEF_32*SIMD_PARA_SHA1)
#if ARCH_LITTLE_ENDIAN==1
#define GETPOS(i,idx) ( (idx&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)idx/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32 )
#else
#define GETPOS(i,idx) ( (idx&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)idx/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32 )
#endif
#define HASH_IDX(idx) (((unsigned int)idx&(SIMD_COEF_32-1))+(unsigned int)idx/SIMD_COEF_32*5*SIMD_COEF_32)
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME " AES"
#define PLAINTEXT_LENGTH 26
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#else
#define ALGORITHM_NAME "SHA1 AES 32/" ARCH_BITS_STR
#define PLAINTEXT_LENGTH 125
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define ROUNDS 0x40000
#ifdef _MSC_VER
#undef _OPENMP
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#include "rar_common.c"
#include "memdbg.h"
// these are supposed to be stack arrays; however gcc cannot correctly align
// stack arrays so we have to use global arrays; we may switch back to stack
// arrays (which take less space) when gcc fixes this issue
#ifdef SIMD_COEF_32
static uint8_t (*vec_in)[2][NBKEYS*64];
static uint32_t (*vec_out)[NBKEYS*5];
static uint8_t (*tmp_in)[NBKEYS*64];
static uint32_t (*tmp_out)[NBKEYS*5];
#endif
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
self->params.max_keys_per_crypt *= omp_t;
#endif /* _OPENMP */
// Length is a cost. We sort in buckets but we need them to be mostly full
self->params.max_keys_per_crypt *= PLAINTEXT_LENGTH;
if (options.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH);
unpack_data = mem_calloc(omp_t, sizeof(unpack_data_t));
cracked = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*cracked));
// allocate 1 more slot to handle the tail of vector buffer
saved_key = mem_calloc(self->params.max_keys_per_crypt + 1,
UNICODE_LENGTH);
saved_len = mem_calloc(self->params.max_keys_per_crypt + 1,
sizeof(*saved_len));
if (!saved_salt)
saved_salt = mem_calloc(8, 1);
aes_key = mem_calloc(self->params.max_keys_per_crypt + 1, 16);
aes_iv = mem_calloc(self->params.max_keys_per_crypt + 1, 16);
#ifdef SIMD_COEF_32
vec_in = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*vec_in), MEM_ALIGN_CACHE);
vec_out = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*vec_out), MEM_ALIGN_CACHE);
tmp_in = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*tmp_in), MEM_ALIGN_CACHE);
tmp_out = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*tmp_out), MEM_ALIGN_CACHE);
#endif
#ifdef DEBUG
self->params.benchmark_comment = " (1-16 characters)";
#endif
/* CRC-32 table init, do it before we start multithreading */
{
CRC32_t crc;
CRC32_Init(&crc);
}
}
static void done(void)
{
MEM_FREE(aes_iv);
MEM_FREE(aes_key);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
MEM_FREE(cracked);
MEM_FREE(unpack_data);
MEM_FREE(saved_salt);
#ifdef SIMD_COEF_32
MEM_FREE(vec_in);
MEM_FREE(vec_out);
MEM_FREE(tmp_in);
MEM_FREE(tmp_out);
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef SIMD_COEF_32
int len;
int *indices;
int tot_todo = 0;
/* Tricky formula, see GitHub #1692 :-) */
indices = mem_calloc(count + MIN(PLAINTEXT_LENGTH + 1, count) *
(NBKEYS - 1), sizeof(*indices));
// sort passwords by length
for (len = 0; len <= PLAINTEXT_LENGTH*2; len += 2) {
for (index = 0; index < count; ++index) {
if (saved_len[index] == len)
indices[tot_todo++] = index;
}
while (tot_todo % NBKEYS)
indices[tot_todo++] = count;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < tot_todo; index += NBKEYS) {
unsigned int i, j, k;
uint8_t (*RawPsw)[NBKEYS*64] = vec_in[index/NBKEYS];
uint32_t *digest = vec_out[index/NBKEYS];
// all passwords in one batch has the same length
int pw_len = saved_len[indices[index]];
int RawLength = pw_len + 8 + 3;
int cur_len = 0;
int fst_blk = 1;
int cur_buf = 0;
unsigned char tmp1 = 0, tmp2 = 0;
for (i = 0; i < ROUNDS; ++i) {
// copy passwords to vector buffer
for (j = 0; j < NBKEYS; ++j) {
int idx = indices[index + j];
int len = cur_len;
for (k = 0; k < pw_len; ++k) {
RawPsw[(len & 64)>>6][GETPOS(len%64, j)] =
saved_key[UNICODE_LENGTH*idx + k];
len++;
}
for (k = 0; k < 8; ++k) {
RawPsw[(len & 64)>>6][GETPOS(len%64, j)] = saved_salt[k];
len++;
}
RawPsw[(len & 64)>>6][GETPOS(len%64, j)] = (unsigned char)i;
len++;
if ( ((unsigned char) i) == 0) {
tmp1 = (unsigned char)(i >> 8);
tmp2 = (unsigned char)(i >> 16);
}
RawPsw[(len & 64)>>6][GETPOS(len%64, j)] = tmp1;
len++;
RawPsw[(len & 64)>>6][GETPOS(len%64, j)] = tmp2;
}
cur_len += RawLength;
if (i % (ROUNDS / 16) == 0) {
uint8_t *tempin = tmp_in[index/NBKEYS];
uint32_t *tempout = tmp_out[index/NBKEYS];
memcpy(tempin, RawPsw[cur_buf], NBKEYS*64);
for (j = 0; j < NBKEYS; ++j) { // padding
uint32_t *tail;
for (k = RawLength; k < 64; ++k)
tempin[GETPOS(k, j)] = 0;
tempin[GETPOS(RawLength, j)] = 0x80;
#if ARCH_LITTLE_ENDIAN==1
tail = (uint32_t*)&tempin[GETPOS(64 - 1, j)];
#else
tail = (uint32_t*)&tempin[GETPOS(64 - 1 - 3, j)];
#endif
*tail = cur_len*8;
}
if (i == 0)
SIMDSHA1body(tempin, tempout, NULL, SSEi_MIXED_IN);
else
SIMDSHA1body(tempin, tempout, digest,
SSEi_MIXED_IN | SSEi_RELOAD);
for (j = 0; j < NBKEYS; ++j) {
int idx = indices[index + j];
aes_iv[idx*16 + i/(ROUNDS/16)] =
(uint8_t)tempout[HASH_IDX(j) + 4*SIMD_COEF_32];
}
}
// swap out and compute digests on the filled buffer
if ((cur_len & 64) != (cur_buf << 6)) {
if (fst_blk)
SIMDSHA1body(RawPsw[cur_buf], digest, NULL, SSEi_MIXED_IN);
else
SIMDSHA1body(RawPsw[cur_buf], digest, digest,
SSEi_MIXED_IN | SSEi_RELOAD);
fst_blk = 0;
cur_buf = 1 - cur_buf;
}
}
// padding
memset(RawPsw[0], 0, sizeof(RawPsw[0]));
for (j = 0; j < NBKEYS; ++j) {
uint32_t *tail;
RawPsw[0][GETPOS(0, j)] = 0x80;
#if ARCH_LITTLE_ENDIAN==1
tail = (uint32_t*)&RawPsw[0][GETPOS(64 - 1, j)];
#else
tail = (uint32_t*)&RawPsw[0][GETPOS(64 - 1 - 3, j)];
#endif
*tail = cur_len*8;
}
SIMDSHA1body(RawPsw[0], digest, digest, SSEi_MIXED_IN | SSEi_RELOAD);
for (j = 0; j < NBKEYS; ++j) {
for (i = 0; i < 4; ++i) {
int idx = indices[index + j];
uint32_t *dst = (uint32_t*)&aes_key[idx*16];
#if ARCH_LITTLE_ENDIAN==1
dst[i] = digest[HASH_IDX(j) + i*SIMD_COEF_32];
#else
dst[i] = JOHNSWAP(digest[HASH_IDX(j) + i*SIMD_COEF_32]);
#endif
}
}
}
MEM_FREE(indices);
#else
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
int i16 = index*16;
unsigned int i;
unsigned char RawPsw[UNICODE_LENGTH + 8 + 3];
int RawLength;
SHA_CTX ctx, tempctx;
unsigned int digest[5];
unsigned char *PswNum, tempout[20];
RawLength = saved_len[index] + 8 + 3;
PswNum = (unsigned char*) &RawPsw[saved_len[index] + 8];
PswNum[1] = PswNum[2] = 0;
/* derive IV and key for AES from saved_key and
saved_salt, this code block is based on unrarhp's
and unrar's sources */
memcpy(RawPsw, &saved_key[UNICODE_LENGTH * index], saved_len[index]);
memcpy(RawPsw + saved_len[index], saved_salt, 8);
SHA1_Init(&ctx);
for (i = 0; i < ROUNDS; i++) {
PswNum[0] = (unsigned char) i;
if ( ((unsigned char) i) == 0) {
PswNum[1] = (unsigned char) (i >> 8);
PswNum[2] = (unsigned char) (i >> 16);
}
SHA1_Update(&ctx, RawPsw, RawLength);
if (i % (ROUNDS / 16) == 0) {
tempctx = ctx;
SHA1_Final(tempout, &tempctx);
aes_iv[i16 + i / (ROUNDS / 16)] = tempout[19];
}
}
SHA1_Final((unsigned char*)digest, &ctx);
for (i = 0; i < 4; i++) /* reverse byte order */
digest[i] = JOHNSWAP(digest[i]);
memcpy(&aes_key[i16], (unsigned char*)digest, 16);
}
#endif
check_rar(count);
return count;
}
struct fmt_main fmt_rar = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
cpu_tests
},{
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_dyna_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#else
#ifdef __GNUC__
#pragma message(": target system requires aligned memory access, rar format disabled:")
#elif _MSC_VER
#warning ": target system requires aligned memory access, rar format disabled:"
#endif
#endif
|
udr-4.c | /* { dg-do compile } */
struct S;
#pragma omp declare reduction (+:struct S:omp_out.s += omp_in.s) /* { dg-error "invalid use of undefined type" } */
struct S { int s; };
#pragma omp declare reduction (*:struct S:omp_out.s *= omp_in.s)
|
LM.h | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#pragma once
#include "../recursive/Recursive.h"
namespace Saiga
{
template <typename T>
void applyLMDiagonalInner(T& diag, double lambda = 1.00e-04, double min_lm_diagonal = 1e-6,
double max_lm_diagonal = 1e32)
{
for (int k = 0; k < diag.RowsAtCompileTime; ++k)
{
auto& value = diag.diagonal()(k);
value = value + lambda * value;
value = clamp(value, min_lm_diagonal, max_lm_diagonal);
}
}
/**
* Applies the Levenberg Marquarad Diagonal update to a recursive diagonal matrix.
*
* U = U + clamp(diag(U) * lambda,min,max)
*/
template <typename T>
void applyLMDiagonal(Eigen::DiagonalMatrix<T, -1>& U, double lambda = 1.00e-04, double min_lm_diagonal = 1e-6,
double max_lm_diagonal = 1e32)
{
for (int i = 0; i < U.rows(); ++i)
{
auto& diag = U.diagonal()(i).get();
applyLMDiagonalInner(diag, lambda, min_lm_diagonal, max_lm_diagonal);
}
}
template <typename T>
void applyLMDiagonal_omp(Eigen::DiagonalMatrix<T, -1>& U, double lambda = 1.00e-04, double min_lm_diagonal = 1e-6,
double max_lm_diagonal = 1e32)
{
#pragma omp for
for (int i = 0; i < U.rows(); ++i)
{
auto& diag = U.diagonal()(i).get();
applyLMDiagonalInner(diag, lambda, min_lm_diagonal, max_lm_diagonal);
}
}
/**
* Simplified LM diagonal update, used by the g2o framwork
*
* U = U + ID * lambda
*/
template <typename T>
void applyLMDiagonalG2O(Eigen::DiagonalMatrix<T, -1>& U, double lambda = 1.00e-04)
{
for (int i = 0; i < U.rows(); ++i)
{
auto& diag = U.diagonal()(i).get();
for (int k = 0; k < diag.RowsAtCompileTime; ++k)
{
auto& value = diag.diagonal()(k);
value = value + lambda;
}
}
}
inline void updateLambda(double& lambda, bool success)
{
if (success)
{
lambda /= 2.0;
}
else
{
lambda *= 2.0;
}
}
} // namespace Saiga
|
GrB_Semiring_wait.c | //------------------------------------------------------------------------------
// GrB_Semiring_wait: wait for a user-defined GrB_Semiring to complete
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// In SuiteSparse:GraphBLAS, a user-defined GrB_Semiring has no pending
// operations to wait for. All this method does is verify that the semiring is
// properly initialized, and then it does an OpenMP flush.
#include "GB.h"
GrB_Info GrB_Semiring_wait // no work, just check if the GrB_Semiring is valid
(
GrB_Semiring semiring,
GrB_WaitMode waitmode
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GrB_Semiring_wait (semiring, mode)") ;
GB_RETURN_IF_NULL_OR_FAULTY (semiring) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
trans_gain.c | /* Daala video codec
Copyright (c) 2013 Daala project contributors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS”
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/
/* 1D coding gain (dB) **********************
AR p=.95 4x4 8x8 16x16
------------------------------------------
KLT 7.5825 8.8462 9.4781
DCT 7.5701 8.8259 9.4555
CDF(9/7) 8.4687 9.4592 9.7866
LappedKLT 8.5633 9.4908 9.8951
LappedDCT 8.5523 9.4871 9.8929
Subset 1 4x4 8x8 16x16
------------------------------------------
KLT original 8.7714 10.2588 11.0039
collapsed 8.7714 10.2588 11.0039
monty 8.7654 10.2628 11.0292
DCT 8.7620 10.2427 10.9861
8.7620 10.2427 10.9861
8.7561 10.2467 11.0115
CDF(9/7) 9.3794 10.5932 11.0685
9.3845 10.5957 11.0825
9.4155 10.6576 11.1965
LappedKLT 9.6276 10.7860 11.3254
9.6277 10.7867 11.3296
9.6295 10.8056 11.3722
LappedDCT 9.6213 10.7832 11.3230
9.6214 10.7839 11.3272
9.6232 10.8028 11.3698
Subset 3 4x4 8x8 16x16
------------------------------------------
KLT original 10.5669 12.3711 13.2694
collapsed 10.5669 12.3711 13.2694
monty 10.5495 12.3573 13.2729
DCT 10.5546 12.3532 13.2535
10.5547 12.3532 13.2535
10.5373 12.3395 13.2572
CDF(9/7) 11.3102 12.6838 13.1845
11.3106 12.6871 13.2009
11.3389 12.7764 13.4084
LappedKLT 11.6048 13.0138 13.6488
11.6046 13.0136 13.6491
11.5922 13.0126 13.6790
LappedDCT 11.5970 13.0111 13.6464
11.5968 13.0110 13.6467
11.5844 13.0099 13.6766
*/
/* 2D coding gain (dB) **********************
AR p=.95 4x4 8x8 16x16
------------------------------------------
KLT 15.1649 17.6924 18.9562
DCT 15.1403 17.6518 18.9109
CDF(9/7) 16.9374 18.9183 19.5731
LappedKLT 17.1265 18.9816 19.7902
LappedDCT 17.1047 18.9741 19.7858
Subset 1 4x4 8x8 16x16
------------------------------------------
KLT original 12.4432 ------- -------
collapsed 12.4428 ------- -------
monty 12.4732 13.6167 14.1170
DCT 12.3695 ------- -------
12.3698 ------- -------
12.4182 13.5473 14.0536
CDF(9/7) ------- ------- -------
------- ------- -------
13.1425 13.8184 14.0110
LappedKLT 13.2807 ------- -------
13.2808 ------- -------
13.3452 14.1273 14.4041
LappedDCT 13.2682 ------- -------
13.2685 ------- -------
13.3330 14.1215 14.3981
Subset 3 4x4 8x8 16x16
------------------------------------------
KLT monty 14.9078 16.2416 16.7839
DCT 14.8313 16.1578 16.7221
CDF(9/7) 15.7553 16.4760 16.6656
LappedKLT 15.9763 16.8549 17.1181
LappedDCT 15.9627 16.8507 17.1152
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdlib.h>
#include "od_defs.h"
#include "od_filter.h"
#include "trans_tools.h"
#define BLOCKSIZE_LOG (4)
#define USE_LAPPING (1)
#define USE_KLT (1)
#define USE_DCT (0)
#define USE_WAVELET (0)
#define USE_2D (1)
#define USE_FILES (1)
#define USE_AR95 (0)
#define COMPUTE_NATHAN (1)
#define PRINT_COV (0)
#define BLOCKSIZE (1<<BLOCKSIZE_LOG)
#if USE_WAVELET
#if BLOCKSIZE_LOG==1
# define SUPPORT (20)
#else
# if BLOCKSIZE_LOG==2
# define SUPPORT (40)
# else
# if BLOCKSIZE_LOG==3
# define SUPPORT (80)
# else
# if BLOCKSIZE_LOG==4
# define SUPPORT (160)
# else
# error "no support configuration for transform size"
# endif
# endif
# endif
#endif
#else
#if USE_LAPPING||COMPUTE_NATHAN
/* larger than needed for 'new' covariance code, but it won't alter
the answer, just produce a larger than needed covariance matrix.
It is needed to make the boundary conditions of the 'old'
covariance code match the trans and trans2d utils */
#define SUPPORT (BLOCKSIZE*2)
#else
#define SUPPORT (BLOCKSIZE)
#endif
#endif
const int *f;
typedef void (*ne_fdct_func_1d)(double *_out,const double *_in,int _in_stride);
typedef void (*ne_idct_func_1d)(double *_out,int _out_stride,const double *_in);
extern const ne_idct_func_1d OD_IDCT_1D_DOUBLE[OD_NBSIZES];
extern const ne_fdct_func_1d OD_FDCT_1D_DOUBLE[OD_NBSIZES];
#if USE_FILES
typedef struct {
int sz;
u_int64_t *n;
u_int64_t *acc_i;
u_int64_t *acc_j;
u_int64_t *acc_ij;
double *cov;
} cov_state;
static void cov_init(cov_state *_this, int _sz){
_this->sz = _sz;
_this->n = (u_int64_t *)calloc(_sz,sizeof(*_this->n));
_this->acc_i = (u_int64_t *)calloc(_sz,sizeof(*_this->acc_i));
_this->acc_j = (u_int64_t *)calloc(_sz,sizeof(*_this->acc_j));
_this->acc_ij= (u_int64_t *)calloc(_sz,sizeof(*_this->acc_ij));
_this->cov = (double *)calloc(_sz,sizeof(*_this->cov));
}
static void cov_clear(cov_state *_this){
if(_this){
if(_this->n) free(_this->n);
if(_this->acc_i) free(_this->acc_i);
if(_this->acc_j) free(_this->acc_j);
if(_this->acc_ij) free(_this->acc_ij);
if(_this->cov) free(_this->cov);
}
}
#if USE_2D
/* 1D and 2D could both use the same generalized code, but it would be
harder to read */
static void cov_accumulate_2d(cov_state *_this,
const unsigned char *_data,
int _stride, int _w, int _h){
int x,y,i,j;
int sz = sqrt(_this->sz);
for(i=0;i<sz;i++){
for(j=0;j<sz;j++){
int ij = i*sz+j;
for(y=0;y<_h-i;y++){
const unsigned char *di=_data+y*_stride;
const unsigned char *dj=_data+(y+i)*_stride+j;
for(x=0;x<_w-j;x++){
++_this->n[ij];
_this->acc_i[ij] += di[x];
_this->acc_j[ij] += dj[x];
_this->acc_ij[ij] += di[x]*dj[x];
}
}
}
}
}
#else
static void cov_accumulate_1d(cov_state *_this,
const unsigned char *_data,
int _stride, int _n){
int i,j;
for(i=0;i<_this->sz;i++){
const unsigned char *di=_data;
const unsigned char *dj=_data+i*_stride;
for(j=0;j<_n-i;j++){
++_this->n[i];
_this->acc_i[i] += di[j*_stride];
_this->acc_j[i] += dj[j*_stride];
_this->acc_ij[i] += di[j*_stride]*dj[j*_stride];
}
}
}
#endif
static void cov_combine(cov_state *_a,const cov_state *_b){
int i;
for(i=0;i<_a->sz;i++){
_a->acc_i[i] += _b->acc_i[i];
_a->acc_j[i] += _b->acc_j[i];
_a->acc_ij[i] += _b->acc_ij[i];
_a->n[i] += _b->n[i];
}
}
static void cov_compute(cov_state *_this){
int i;
for(i=0;i<_this->sz;i++)
_this->cov[i] =
((double)_this->acc_ij[i] -
(double)_this->acc_i[i]*
_this->acc_j[i]/_this->n[i])/_this->n[i];
for(i=1;i<_this->sz;i++)
_this->cov[i] /= _this->cov[0];
_this->cov[0]=1.;
}
static void process_files(trans_ctx *_ctx,
cov_state *_cov,
int _argc,
const char *_argv[]){
int ai;
#pragma omp parallel for schedule(dynamic)
for(ai=1;ai<_argc;ai++){
FILE *fin;
video_input vid;
video_input_info info;
video_input_ycbcr ycbcr;
int tid;
cov_state *cov;
int x0,y0,x1,y1;
fin=fopen(_argv[ai],"rb");
if(fin==NULL){
fprintf(stderr,"Could not open '%s' for reading.\n",_argv[ai]);
continue;
}
if(video_input_open(&vid,fin)<0){
fprintf(stderr,"Error reading video info from '%s'.\n",_argv[ai]);
continue;
}
video_input_get_info(&vid,&info);
if(video_input_fetch_frame(&vid,ycbcr,NULL)<0){
fprintf(stderr,"Error reading first frame from '%s'.\n",_argv[ai]);
continue;
}
tid=OD_OMP_GET_THREAD;
cov=_cov+tid;
x0 = info.pic_x;
y0 = info.pic_y;
x1 = x0 + info.pic_w;
y1 = y0 + info.pic_h;
fprintf(stderr,"%s\n",_argv[ai]);
/* map */
{
int stride=ycbcr[0].stride;
const unsigned char *data=ycbcr[0].data;
#if COMPUTE_NATHAN
/* block-based full covariance computation (unlord style) */
int nxblocks=info.pic_w>>BLOCKSIZE_LOG;
int nyblocks=info.pic_h>>BLOCKSIZE_LOG;
trans_ctx *ctx=_ctx+tid;
# if USE_2D
unsigned char buf[SUPPORT][SUPPORT];
int x,y,i,j;
image_ctx_init(&ctx->img,_argv[ai],nxblocks,nyblocks);
for(y=0;y<nyblocks*BLOCKSIZE-SUPPORT+1;y++){
for(x=0;x<nxblocks*BLOCKSIZE-SUPPORT+1;x++){
for(j=0;j<SUPPORT;j++){
for(i=0;i<SUPPORT;i++){
buf[j][i]=data[(y0+y+j)*stride+(x0+x+i)];
}
}
trans_data_add(&ctx->td,(unsigned char *)buf);
}
}
# else
unsigned char buf[SUPPORT];
int x,y,z;
image_ctx_init(&ctx->img,_argv[ai],nxblocks,nyblocks);
/* add the rows */
for(y=0;y<nyblocks*BLOCKSIZE;y++){
for(x=0;x<nxblocks*BLOCKSIZE-SUPPORT+1;x++){
for(z=0;z<SUPPORT;z++){
buf[z]=data[(y+y0)*stride+x+x0+z];
}
trans_data_add(&ctx->td,buf);
}
}
/* add the columns */
for(y=0;y<nyblocks*BLOCKSIZE-SUPPORT+1;y++){
for(x=0;x<nxblocks*BLOCKSIZE;x++){
for(z=0;z<SUPPORT;z++){
buf[z]=data[(y0+y+z)*stride+x+x0];
}
trans_data_add(&ctx->td,buf);
}
}
# endif
#endif
/* Direct computation of collapsed covariance matrix (monty style) */
#if USE_2D
cov_accumulate_2d(cov,data+y0*stride+x0,stride,x1-x0,y1-y0);
#else
{
int x,y;
for(y=y0;y<y1;y++)
cov_accumulate_1d(cov,data+y*stride+x0,1,x1-x0);
for(x=x0;x<x1;x++)
cov_accumulate_1d(cov,data+y0*stride+x,stride,y1-y0);
}
#endif
}
video_input_close(&vid);
}
}
#endif
#if USE_WAVELET
/* some lifting CDF (9/7) wavelet code from Google Code's axonlib */
/* http://code.google.com/p/axonlib/source/browse/trunk/extern/dwt97.c?spec=svn19&r=19 */
/* single stage of decomposition */
static void fwt97_i(double* x,int n){
double temp[SUPPORT];
double a;
int i;
/* Predict 1 */
a=-1.586134342;
for (i=1;i<n-2;i+=2)
x[i]+=a*(x[i-1]+x[i+1]);
x[n-1]+=2*a*x[n-2];
/* Update 1 */
a=-0.05298011854;
for (i=2;i<n;i+=2)
x[i]+=a*(x[i-1]+x[i+1]);
x[0]+=2*a*x[1];
/* Predict 2 */
a=0.8829110762;
for (i=1;i<n-2;i+=2)
x[i]+=a*(x[i-1]+x[i+1]);
x[n-1]+=2*a*x[n-2];
/* Update 2 */
a=0.4435068522;
for (i=2;i<n;i+=2)
x[i]+=a*(x[i-1]+x[i+1]);
x[0]+=2*a*x[1];
/* Scale */
a=1/1.149604398;
for (i=0;i<n;i++)
{
if (i%2) x[i]*=a;
else x[i]/=a;
}
/* Pack */
for (i=0;i<n;i++){
if (i%2==0)
temp[i/2]=x[i];
else
temp[n/2+i/2]=x[i];
}
for (i=0;i<n;i++) x[i]=temp[i];
}
/* single stage of reconstruction */
void iwt97_i(double* x,int n){
double temp[SUPPORT];
double a;
int i;
/* Unpack */
for (i=0;i<n/2;i++){
temp[i*2]=x[i];
temp[i*2+1]=x[i+n/2];
}
for (i=0;i<n;i++) x[i]=temp[i];
/* Undo scale */
a=1.149604398;
for (i=0;i<n;i++)
{
if (i%2) x[i]*=a;
else x[i]/=a;
}
/* Undo update 2 */
a=-0.4435068522;
for (i=2;i<n;i+=2)
x[i]+=a*(x[i-1]+x[i+1]);
x[0]+=2*a*x[1];
/* Undo predict 2 */
a=-0.8829110762;
for (i=1;i<n-2;i+=2)
x[i]+=a*(x[i-1]+x[i+1]);
x[n-1]+=2*a*x[n-2];
/* Undo update 1 */
a=0.05298011854;
for (i=2;i<n;i+=2)
x[i]+=a*(x[i-1]+x[i+1]);
x[0]+=2*a*x[1];
/* Undo predict 1 */
a=1.586134342;
for (i=1;i<n-2;i+=2)
x[i]+=a*(x[i-1]+x[i+1]);
x[n-1]+=2*a*x[n-2];
}
/* multistage decomposition */
void fwt97(double *out, int n, double *in, int support){
int i=n,j=support,k;
while((i&1)==0){
fwt97_i(in,j);
i>>=1;
for(k=0;k<i;k++)
out[i+k] = in[((((j*3)>>1)-i)>>1) + k];
j>>=1;
}
for(k=0;k<i;k++)
out[k] = in[((j-i)>>1) + k];
}
/* multistage reconstruction */
void iwt97(double *out, int support, double *in, int n){
int i=n,j=support,k;
for(k=0;k<support;k++)
out[k]=0;
while((i&1)==0){
i>>=1;
for(k=0;k<i;k++)
out[((((j*3)>>1)-i)>>1) + k]=in[i+k];
j>>=1;
}
for(k=0;k<i;k++)
out[((j-i)>>1) + k]=in[k];
i<<=1;
j<<=1;
while(j<=support){
iwt97_i(out,j);
i<<=1;
j<<=1;
}
}
#endif
#if USE_KLT
void symeigen(double *out,
double *cov,
int support){
int i;
int j;
int k;
for(i=0;i<support;i++)
for(j=0;j<support;j++)
out[i*support+j]=i==j;
for(;;){
double mod=0.;
for(i=0,j=0,k=0;k<support;k++){
int m;
for(m=k+1;m<support;m++){
double q;
q=fabs(cov[k*support+m]);
if(q>mod){
mod=q;
i=k;
j=m;
}
}
}
if(mod<1E-11)break;
{
double th=0.5*atan2(2*cov[i*support+j],cov[i*support+i]-cov[j*support+j]);
double c=cos(th);
double s=sin(th);
for(k=0;k<support;k++){
double t;
t=c*cov[k*support+i]+s*cov[k*support+j];
cov[k*support+j]=-s*cov[k*support+i]+c*cov[k*support+j];
cov[k*support+i]=t;
}
for(k=0;k<support;k++){
double t;
t=c*cov[i*support+k]+s*cov[j*support+k];
cov[j*support+k]=-s*cov[i*support+k]+c*cov[j*support+k];
cov[i*support+k]=t;
}
for(k=0;k<support;k++){
double t;
t=c*out[i*support+k]+s*out[j*support+k];
out[j*support+k]=-s*out[i*support+k]+c*out[j*support+k];
out[i*support+k]=t;
}
}
}
/* for(j=0;j<BLOCKSIZE;j++)eigenvalue[j]=cov[j][j]; don't need eigenvalues */
}
void flap_2d(double out[BLOCKSIZE][BLOCKSIZE],
double in[SUPPORT][SUPPORT],
const int _f[]){
int i,j;
#if USE_LAPPING
#if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES
/* columns */
for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){
double work[BLOCKSIZE*2];
for(j=0;j<BLOCKSIZE*2;j++)
work[j]=in[j+SUPPORT/2-BLOCKSIZE][i];
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&work[0],&work[0],_f);
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&work[BLOCKSIZE],&work[BLOCKSIZE],_f);
for(j=0;j<BLOCKSIZE*2;j++)
in[j+SUPPORT/2-BLOCKSIZE][i]=work[j];
}
/* rows */
for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&in[i][SUPPORT/2-BLOCKSIZE],&in[i][SUPPORT/2-BLOCKSIZE],_f);
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&in[i][SUPPORT/2],&in[i][SUPPORT/2],_f);
}
#else
# error "Need a prefilter implementation for this block size."
#endif
#endif
for(i=0;i<BLOCKSIZE;i++)
for(j=0;j<BLOCKSIZE;j++)
out[i][j]=in[i+SUPPORT/2-BLOCKSIZE/2][j+SUPPORT/2-BLOCKSIZE/2];
}
void ilap_2d(double out[SUPPORT][SUPPORT],
double in[BLOCKSIZE][BLOCKSIZE],
const int _f[]){
int i,j;
for(i=0;i<SUPPORT;i++)
for(j=0;j<SUPPORT;j++)
out[i][j]=0;
for(i=0;i<BLOCKSIZE;i++)
for(j=0;j<BLOCKSIZE;j++)
out[i+SUPPORT/2-BLOCKSIZE/2][j+SUPPORT/2-BLOCKSIZE/2]=in[i][j];
#if USE_LAPPING
#if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES
/* columns */
for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){
double work[BLOCKSIZE*2];
for(j=0;j<BLOCKSIZE*2;j++)
work[j]=out[j+SUPPORT/2-BLOCKSIZE][i];
(*NE_POST_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&work[0],&work[0],_f);
(*NE_POST_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&work[BLOCKSIZE],&work[BLOCKSIZE],_f);
for(j=0;j<BLOCKSIZE*2;j++)
out[j+SUPPORT/2-BLOCKSIZE][i]=work[j];
}
/* rows */
for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){
(*NE_POST_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&out[i][SUPPORT/2-BLOCKSIZE],&out[i][SUPPORT/2-BLOCKSIZE],_f);
(*NE_POST_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&out[i][SUPPORT/2],&out[i][SUPPORT/2],_f);
}
#else
# error "Need a prefilter implementation for this block size."
#endif
#endif
}
void flap_4d(double out[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE],
double in[SUPPORT][SUPPORT][SUPPORT][SUPPORT],
const int _f[]){
int i,j,k,l;
#if USE_LAPPING
#if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES
for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){
for(j=SUPPORT/2-BLOCKSIZE;j<SUPPORT/2+BLOCKSIZE;j++){
for(k=SUPPORT/2-BLOCKSIZE;k<SUPPORT/2+BLOCKSIZE;k++){
double work[BLOCKSIZE*2];
/* [ ][i][j][k] */
for(l=0;l<BLOCKSIZE*2;l++)
work[l]=in[l+SUPPORT/2-BLOCKSIZE][i][j][k];
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&work[0],&work[0],_f);
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&work[BLOCKSIZE],&work[BLOCKSIZE],_f);
for(l=0;l<BLOCKSIZE*2;l++)
in[l+SUPPORT/2-BLOCKSIZE][i][j][k]=work[l];
}
}
}
for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){
for(j=SUPPORT/2-BLOCKSIZE;j<SUPPORT/2+BLOCKSIZE;j++){
for(k=SUPPORT/2-BLOCKSIZE;k<SUPPORT/2+BLOCKSIZE;k++){
double work[BLOCKSIZE*2];
/* [i][ ][j][k] */
for(l=0;l<BLOCKSIZE*2;l++)
work[l]=in[i][l+SUPPORT/2-BLOCKSIZE][j][k];
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&work[0],&work[0],_f);
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&work[BLOCKSIZE],&work[BLOCKSIZE],_f);
for(l=0;l<BLOCKSIZE*2;l++)
in[i][l+SUPPORT/2-BLOCKSIZE][j][k]=work[l];
}
}
}
for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){
for(j=SUPPORT/2-BLOCKSIZE;j<SUPPORT/2+BLOCKSIZE;j++){
for(k=SUPPORT/2-BLOCKSIZE;k<SUPPORT/2+BLOCKSIZE;k++){
double work[BLOCKSIZE*2];
/* [i][j][ ][k] */
for(l=0;l<BLOCKSIZE*2;l++)
work[l]=in[i][j][l+SUPPORT/2-BLOCKSIZE][k];
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&work[0],&work[0],_f);
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&work[BLOCKSIZE],&work[BLOCKSIZE],_f);
for(l=0;l<BLOCKSIZE*2;l++)
in[i][j][l+SUPPORT/2-BLOCKSIZE][k]=work[l];
}
}
}
for(i=SUPPORT/2-BLOCKSIZE;i<SUPPORT/2+BLOCKSIZE;i++){
for(j=SUPPORT/2-BLOCKSIZE;j<SUPPORT/2+BLOCKSIZE;j++){
for(k=SUPPORT/2-BLOCKSIZE;k<SUPPORT/2+BLOCKSIZE;k++){
/* [i][j][k][ ] */
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&in[i][j][k][SUPPORT/2-BLOCKSIZE],&in[i][j][k][SUPPORT/2-BLOCKSIZE],_f);
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&in[i][j][k][SUPPORT/2],&in[i][j][k][SUPPORT/2],_f);
}
}
}
#else
# error "Need a prefilter implementation for this block size."
#endif
#endif
for(i=0;i<BLOCKSIZE;i++)
for(j=0;j<BLOCKSIZE;j++)
for(k=0;k<BLOCKSIZE;k++)
for(l=0;l<BLOCKSIZE;l++)
out[i*BLOCKSIZE+j][k*BLOCKSIZE+l]=in
[i+SUPPORT/2-BLOCKSIZE/2]
[j+SUPPORT/2-BLOCKSIZE/2]
[k+SUPPORT/2-BLOCKSIZE/2]
[l+SUPPORT/2-BLOCKSIZE/2];
}
void gklt_1d(double klt[BLOCKSIZE][BLOCKSIZE],
double cov[SUPPORT][SUPPORT],
const int *_f){
static double workA[SUPPORT][SUPPORT];
static double workB[BLOCKSIZE][BLOCKSIZE];
int i,j;
for(i=0;i<SUPPORT;i++)
for(j=0;j<SUPPORT;j++)
workA[i][j]=cov[i][j];
flap_2d(workB,workA,_f);
symeigen(&klt[0][0],&workB[0][0],BLOCKSIZE);
}
void gklt_2d(double klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE],
double cov[SUPPORT][SUPPORT][SUPPORT][SUPPORT],
const int *_f){
static double workA[SUPPORT][SUPPORT][SUPPORT][SUPPORT];
static double workB[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE];
int i,j,k,l;
for(i=0;i<SUPPORT;i++)
for(j=0;j<SUPPORT;j++)
for(k=0;k<SUPPORT;k++)
for(l=0;l<SUPPORT;l++)
workA[i][j][k][l]=cov[i][j][k][l];
flap_4d(workB,workA,_f);
symeigen(&klt[0][0],&workB[0][0],BLOCKSIZE*BLOCKSIZE);
}
void gklt_1d_collapsed(double klt[BLOCKSIZE][BLOCKSIZE],
double cov[SUPPORT],
const int *_f){
static double workA[SUPPORT][SUPPORT];
static double workB[BLOCKSIZE][BLOCKSIZE];
int i,j;
for(i=0;i<SUPPORT;i++)
for(j=0;j<SUPPORT;j++)
workA[i][j]=cov[abs(i-j)];
flap_2d(workB,workA,_f);
symeigen(&klt[0][0],&workB[0][0],BLOCKSIZE);
}
void gklt_2d_collapsed(double klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE],
double cov[SUPPORT][SUPPORT],
const int *_f){
static double workA[SUPPORT][SUPPORT][SUPPORT][SUPPORT];
static double workB[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE];
int i,j,k,l;
for(i=0;i<SUPPORT;i++)
for(j=0;j<SUPPORT;j++)
for(k=0;k<SUPPORT;k++)
for(l=0;l<SUPPORT;l++)
workA[i][j][k][l]=cov[abs(i-k)][abs(j-l)];
flap_4d(workB,workA,_f);
symeigen(&klt[0][0],&workB[0][0],BLOCKSIZE*BLOCKSIZE);
}
void fklt(double *out,
double *in,
double *klt,
int support){
int i,j;
for(i=0;i<support;i++){
double acc=0.;
for(j=0;j<support;j++)
acc += klt[i*support+j]*in[j];
out[i]=acc;
}
}
void iklt(double *out,
double *in,
double *klt,
int support){
int i,j;
for(i=0;i<support;i++){
double acc=0.;
for(j=0;j<support;j++)
acc+=klt[j*support+i]*in[j];
out[i]=acc;
}
}
#endif
void b_analysis_1d(double *_out,int _out_stride,const double *_in,int _in_stride,
const int *_f, double _klt[BLOCKSIZE][BLOCKSIZE]){
int j;
double t[SUPPORT];
double w[BLOCKSIZE];
for(j=0;j<SUPPORT;j++)
t[j]=_in[j*_in_stride];
#if USE_WAVELET
fwt97(w,BLOCKSIZE,t,SUPPORT);
for(j=0;j<BLOCKSIZE;j++){
_out[j*_out_stride]=w[j];
}
#else
# if USE_LAPPING
# if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&t[SUPPORT/2-BLOCKSIZE],&t[SUPPORT/2-BLOCKSIZE],_f);
(*NE_PRE_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&t[SUPPORT/2],&t[SUPPORT/2],_f);
# else
# error "Need a prefilter implementation for this block size."
# endif
# endif
# if USE_KLT
fklt(&w[0],&t[SUPPORT/2-BLOCKSIZE/2],&_klt[0][0],BLOCKSIZE);
# elif USE_DCT
# if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES
(*OD_FDCT_1D_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(w,&t[SUPPORT/2-BLOCKSIZE/2],1);
# else
# error "Need an fDCT implementation for this block size."
# endif
# else
for(j=0;j<BLOCKSIZE;j++)
w[j]=t[j+SUPPORT/2-BLOCKSIZE/2];
# endif
for(j=0;j<BLOCKSIZE;j++)
_out[j*_out_stride]=w[j];
#endif
}
void b_analysis_2d(double *_out,int _out_stride_i,int _out_stride_j,
const double *_in,int _in_stride_i,int _in_stride_j,
const int *_f, double _klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE]){
#if USE_KLT
/* KLT is a non-separable 2D transform */
double lap[SUPPORT][SUPPORT];
double work[BLOCKSIZE][BLOCKSIZE];
double temp[BLOCKSIZE][BLOCKSIZE];
int i,j;
for(i=0;i<SUPPORT;i++)
for(j=0;j<SUPPORT;j++)
lap[i][j]=*(_in+i*_in_stride_i+j*_in_stride_j);
flap_2d(work,lap,_f);
fklt(&temp[0][0],&work[0][0],&_klt[0][0],BLOCKSIZE*BLOCKSIZE);
for(i=0;i<BLOCKSIZE;i++)
for(j=0;j<BLOCKSIZE;j++)
*(_out+i*_out_stride_i+j*_out_stride_j)=temp[i][j];
#else
double work[SUPPORT][BLOCKSIZE];
int i;
/* DCT and DWT are separable 1D transforms */
/* lapping performed inside b_analysis */
for(i=0;i<SUPPORT;i++)
b_analysis_1d(&work[i][0],1,_in+i*_in_stride_i,_in_stride_j,_f,NULL);
for(i=0;i<BLOCKSIZE;i++)
b_analysis_1d(_out+_out_stride_i*i,_out_stride_j,&work[0][i],BLOCKSIZE,_f,NULL);
#endif
}
void b_synthesis_1d(double *_out,int _out_stride,const double *_in,int _in_stride,
const int *_f, double _klt[BLOCKSIZE][BLOCKSIZE]){
int j;
double w[SUPPORT];
double t[SUPPORT];
for(j=0;j<SUPPORT;j++){
t[j]=0;
w[j]=0;
}
#if USE_WAVELET
for(j=0;j<BLOCKSIZE;j++)
w[j]=_in[j*_in_stride];
iwt97(t,SUPPORT,w,BLOCKSIZE);
#else
for(j=0;j<BLOCKSIZE;j++){
w[SUPPORT/2-BLOCKSIZE/2+j]=_in[j*_in_stride];
}
# if USE_KLT
iklt(&t[SUPPORT/2-BLOCKSIZE/2],&w[SUPPORT/2-BLOCKSIZE/2],&_klt[0][0],BLOCKSIZE);
# elif USE_DCT
# if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES
(*OD_IDCT_1D_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&t[SUPPORT/2-BLOCKSIZE/2],1,&w[SUPPORT/2-BLOCKSIZE/2]);
# else
# error "Need an iDCT implementation for this block size."
# endif
# else
for(j=0;j<SUPPORT;j++)
t[j]=w[j];
# endif
# if USE_LAPPING
# if BLOCKSIZE_LOG>=OD_LOG_BSIZE0&&BLOCKSIZE_LOG<OD_LOG_BSIZE0+OD_NBSIZES
(*NE_POST_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&t[SUPPORT/2-BLOCKSIZE],&t[SUPPORT/2-BLOCKSIZE],_f);
(*NE_POST_FILTER_DOUBLE[BLOCKSIZE_LOG-OD_LOG_BSIZE0])
(&t[SUPPORT/2],&t[SUPPORT/2],_f);
# else
# error "Need a postfilter implementation for this block size."
# endif
# endif
#endif
for(j=0;j<SUPPORT;j++)
_out[j*_out_stride]=t[j];
}
void b_synthesis_2d(double *_out,int _out_stride_i,int _out_stride_j,
const double *_in,int _in_stride_i,int _in_stride_j,
const int *_f, double _klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE]){
#if USE_KLT
/* KLT is a non-separable 2D transform */
double temp[BLOCKSIZE][BLOCKSIZE];
double work[BLOCKSIZE][BLOCKSIZE];
double lap[SUPPORT][SUPPORT];
int i,j;
for(i=0;i<BLOCKSIZE;i++)
for(j=0;j<BLOCKSIZE;j++)
temp[i][j]=*(_in+i*_in_stride_i+j*_in_stride_j);
iklt(&work[0][0],&temp[0][0],&_klt[0][0],BLOCKSIZE*BLOCKSIZE);
ilap_2d(lap,work,_f);
for(i=0;i<SUPPORT;i++)
for(j=0;j<SUPPORT;j++)
*(_out+i*_out_stride_i+j*_out_stride_j)=lap[i][j];
#else
double work[SUPPORT][BLOCKSIZE];
int i;
/* DCT and DWT are separable 1D transforms */
/* lapping performed inside b_analysis */
for(i=0;i<BLOCKSIZE;i++)
b_synthesis_1d(&work[0][i],BLOCKSIZE,_in+i*_in_stride_i,_in_stride_j,_f,NULL);
for(i=0;i<SUPPORT;i++)
b_synthesis_1d(_out+_out_stride_i*i,_out_stride_j,&work[i][0],1,_f,NULL);
#endif
}
#if USE_2D
static double cg_2d_i(double rggt[SUPPORT][SUPPORT][BLOCKSIZE][BLOCKSIZE],
const int *_f,
double _klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE]){
double r[BLOCKSIZE][BLOCKSIZE][BLOCKSIZE][BLOCKSIZE];
double s[SUPPORT][BLOCKSIZE];
double ggrggt[BLOCKSIZE][BLOCKSIZE][BLOCKSIZE][BLOCKSIZE];
double cg=0;
int i;
int j;
int v;
int u;
int k;
int l;
/* G1*P*G2*R*(G2*P*G1)^T */
for(v=0;v<BLOCKSIZE;v++)
for(j=0;j<BLOCKSIZE;j++)
b_analysis_2d(&ggrggt[v][j][0][0],
1,BLOCKSIZE,
&rggt[0][0][v][j],
BLOCKSIZE*BLOCKSIZE*SUPPORT,
BLOCKSIZE*BLOCKSIZE,
f,_klt);
/* H1*P*H2 */
for(i=0;i<BLOCKSIZE;i++)
for(j=0;j<BLOCKSIZE;j++)
for(k=0;k<BLOCKSIZE;k++)
for(l=0;l<BLOCKSIZE;l++)
r[i][j][k][l] = (i*BLOCKSIZE+j==k*BLOCKSIZE+l)?1:0;
for(i=0;i<BLOCKSIZE;i++)
for(j=0;j<BLOCKSIZE;j++)
b_synthesis_2d(&rggt[0][0][i][j],
BLOCKSIZE*BLOCKSIZE,
SUPPORT*BLOCKSIZE*BLOCKSIZE,
&r[i][j][0][0],
BLOCKSIZE,1,
_f,_klt);
/* ((H1*P*H2)^T*H1*P*H2)_ii */
for(i=0;i<BLOCKSIZE;i++){
for(j=0;j<BLOCKSIZE;j++){
s[i][j]=0;
for(u=0;u<SUPPORT;u++){
for(v=0;v<SUPPORT;v++){
s[i][j]+=rggt[u][v][i][j]*rggt[u][v][i][j];
}
}
}
}
/* (G1*P*G2*R*(G1*P*G2)^T)_ii * ((H1*P*H2)^T*H1*P*H2)_ii */
for(i=0;i<BLOCKSIZE;i++)
for(j=0;j<BLOCKSIZE;j++)
cg-=10*log10(ggrggt[i][j][i][j]*s[i][j]);
return cg/(BLOCKSIZE*BLOCKSIZE);
}
double cg_2d(double _in[SUPPORT][SUPPORT][SUPPORT][SUPPORT],
const int *_f){
int v;
int j;
double ret;
double (*rggt)[SUPPORT][BLOCKSIZE][BLOCKSIZE] =
(double (*)[SUPPORT][BLOCKSIZE][BLOCKSIZE])
malloc(SUPPORT*SUPPORT*BLOCKSIZE*BLOCKSIZE*sizeof(****rggt));
double klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE];
#if USE_KLT
gklt_2d(klt,_in,_f);
#endif
/* R*(G2*P*G1)^T */
for(v=0;v<SUPPORT;v++)
for(j=0;j<SUPPORT;j++)
b_analysis_2d(&rggt[v][j][0][0],
1,BLOCKSIZE,
&_in[0][0][v][j],
SUPPORT*SUPPORT*SUPPORT,
SUPPORT*SUPPORT,
_f,klt);
ret = cg_2d_i(rggt,f,klt);
free(rggt);
return ret;
}
double cg_2d_collapsed(double _in[SUPPORT][SUPPORT],const int *_f){
int v;
int u;
int j;
int i;
double ret;
double r[SUPPORT][SUPPORT];
double (*rggt)[SUPPORT][BLOCKSIZE][BLOCKSIZE] =
(double (*)[SUPPORT][BLOCKSIZE][BLOCKSIZE])
malloc(SUPPORT*SUPPORT*BLOCKSIZE*BLOCKSIZE*sizeof(****rggt));
double klt[BLOCKSIZE*BLOCKSIZE][BLOCKSIZE*BLOCKSIZE];
#if USE_KLT
gklt_2d_collapsed(klt,_in,_f);
#endif
/* R*(G2*P*G1)^T */
for(v=0;v<SUPPORT;v++){
for(j=0;j<SUPPORT;j++){
for(u=0;u<SUPPORT;u++)
for(i=0;i<SUPPORT;i++)
r[u][i]=_in[abs(u-v)][abs(i-j)];
b_analysis_2d(&rggt[v][j][0][0],
1,BLOCKSIZE,
&r[0][0],SUPPORT,1,_f,klt);
}
}
ret = cg_2d_i(rggt,f,klt);
free(rggt);
return ret;
}
#else
static double cg_1d_i(double rgt[SUPPORT][BLOCKSIZE],
const int *_f,
double klt[BLOCKSIZE][BLOCKSIZE]){
int j;
int i;
double r[BLOCKSIZE];
double grgt[BLOCKSIZE][BLOCKSIZE];
double cg=0;
/* G*R*G^T */
for(i=0;i<BLOCKSIZE;i++)
b_analysis_1d(&grgt[0][i],BLOCKSIZE,&rgt[0][i],BLOCKSIZE,_f,klt);
/* H */
for(j=0;j<BLOCKSIZE;j++){
for(i=0;i<BLOCKSIZE;i++){
r[i]=i==j?1:0;
}
b_synthesis_1d(&rgt[0][j],BLOCKSIZE,r,1,_f,klt);
}
/* (G*R*G^T)_ii * (H^T*H)_ii */
for(j=0;j<BLOCKSIZE;j++){
double h=0;
for(i=0;i<SUPPORT;i++){
h+=rgt[i][j]*rgt[i][j];
}
cg-=10*log10(grgt[j][j]*h);
}
return cg/BLOCKSIZE;
}
double cg_1d(double in[SUPPORT][SUPPORT],const int *_f){
int j;
double rgt[SUPPORT][BLOCKSIZE];
double klt[BLOCKSIZE][BLOCKSIZE];
#if USE_KLT
gklt_1d(klt,in,_f);
#endif
/* R*G^T */
for(j=0;j<SUPPORT;j++){
b_analysis_1d(&rgt[j][0],1,in[j],1,_f,klt);
}
return cg_1d_i(rgt,f,klt);
}
double cg_1d_collapsed(double in[SUPPORT],const int *_f){
int j;
int i;
double r[SUPPORT];
double rgt[SUPPORT][BLOCKSIZE];
double klt[BLOCKSIZE][BLOCKSIZE];
#if USE_KLT
gklt_1d_collapsed(klt,in,_f);
#endif
/* R*G^T */
for(j=0;j<SUPPORT;j++){
for(i=0;i<SUPPORT;i++){
r[i]=in[abs(i-j)];
}
b_analysis_1d(&rgt[j][0],1,r,1,_f,klt);
}
return cg_1d_i(rgt,f,klt);
}
#endif
#if USE_FILES
int main(int _argc,const char *_argv[]){
cov_state cvs[NUM_PROCS];
#if COMPUTE_NATHAN
trans_ctx ctx[NUM_PROCS];
double r[SUPPORT*SUPPORT]; /* maximum for 2d */
#else
trans_ctx *ctx=NULL;
#endif
int i;
#if BLOCKSIZE==4
f=OD_FILTER_PARAMS4;
#elif BLOCKSIZE==8
f=OD_FILTER_PARAMS8;
#elif BLOCKSIZE==16
f=OD_FILTER_PARAMS16;
#else
# error "Need filter params for this block size."
#endif
for(i=0;i<NUM_PROCS;i++){
#if USE_2D
cov_init(&cvs[i],SUPPORT*SUPPORT);
#else
cov_init(&cvs[i],SUPPORT);
#endif
}
#if COMPUTE_NATHAN
for(i=0;i<NUM_PROCS;i++){
#if USE_2D
trans_data_init(&ctx[i].td,SUPPORT*SUPPORT);
#else
trans_data_init(&ctx[i].td,SUPPORT);
#endif
}
#endif
OD_OMP_SET_THREADS(NUM_PROCS);
process_files(ctx,cvs,_argc,_argv);
for(i=1;i<NUM_PROCS;i++)
cov_combine(&cvs[0],&cvs[i]);
cov_compute(&cvs[0]);
#if COMPUTE_NATHAN
for(i=1;i<NUM_PROCS;i++)
trans_data_combine(&ctx[0].td,&ctx[i].td);
trans_data_normalize(&ctx[0].td);
#endif
#if PRINT_COV
{
int i,j;
fprintf(stdout,"collapsed_cov=\n");
for(j=0;j<cvs[0].sz/SUPPORT;j++){
for(i=0;i<SUPPORT;i++){
fprintf(stdout,"%s %- 12.6G",i>0?",":"",cvs[0].cov[j*SUPPORT+i]);
}
fprintf(stdout,"\n");
}
}
#endif
#if USE_2D
#if COMPUTE_NATHAN
fprintf(stdout,"original cg=%-24.16G\n",
cg_2d((double(*)[SUPPORT][SUPPORT][SUPPORT])ctx[0].td.cov,f));
trans_data_collapse(&ctx[0].td,SUPPORT,r);
fprintf(stdout,"collapse cg=%-24.16G\n",
cg_2d_collapsed((double(*)[SUPPORT])r,f));
#endif
fprintf(stdout,"monty cg=%-24.16G\n",
cg_2d_collapsed((double(*)[SUPPORT])cvs[0].cov,f));
#else
#if COMPUTE_NATHAN
fprintf(stdout,"original cg=%-24.16G\n",
cg_1d((double (*)[SUPPORT])ctx[0].td.cov,f));
trans_data_collapse(&ctx[0].td,1,r);
fprintf(stdout,"collapse cg=%-24.16G\n",
cg_1d_collapsed(r,f));
#endif
fprintf(stdout,"monty cg=%-24.16G\n",
cg_1d_collapsed(cvs[0].cov,f));
#endif
for(i=0;i<NUM_PROCS;i++)
cov_clear(&cvs[i]);
#if COMPUTE_NATHAN
for(i=0;i<NUM_PROCS;i++)
trans_data_clear(&ctx[i].td);
#endif
return EXIT_SUCCESS;
}
#else
int main(int _argc,const char *_argv[]){
#if USE_2D
double cov[SUPPORT][SUPPORT];
double *r=&cov[0][0];
#else
double cov[SUPPORT];
double *r=&cov[0];
#endif
#if BLOCKSIZE==4
f=OD_FILTER_PARAMS4;
#elif BLOCKSIZE==8
f=OD_FILTER_PARAMS8;
#elif BLOCKSIZE==16
f=OD_FILTER_PARAMS16;
#else
# error "Need filter params for this block size."
#endif
# if USE_2D
auto_regressive_collapsed(r,SUPPORT*SUPPORT,SUPPORT,0.95);
fprintf(stdout,"AR p=.95 cg=%-24.18G\n",cg_2d_collapsed(cov,f));
# else
auto_regressive_collapsed(r,SUPPORT,1,0.95);
fprintf(stdout,"AR p=.95 cg=%-24.18G\n",cg_1d_collapsed(cov,f));
# endif
return EXIT_SUCCESS;
}
#endif
|
DRB006-indirectaccess2-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two pointers have a distance of 12 (p1 - p2 = 12).
They are used as base addresses for indirect array accesses using an index set (another array).
The index set has two indices with a distance of 12 :
indexSet[5]- indexSet[0] = 533 - 521 = 12
So there is loop carried dependence (e.g. between loops with index values of 0 and 5).
We use the default loop scheduling (static even) in OpenMP.
It is possible that two dependent iterations will be scheduled
within a same chunk to a same thread. So there is no runtime data races.
When N is 180, two iteraions with N=0 and N= 5 have loop carried dependences.
For static even scheduling, we must have at least 36 threads (180/36=5 iterations)
so iteration 0 and 5 will be scheduled to two different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define N 180
int indexSet[N] = {
521, 523, 525, 527, 529, 533, // 521+12=533
547, 549, 551, 553, 555, 557,
573, 575, 577, 579, 581, 583,
599, 601, 603, 605, 607, 609,
625, 627, 629, 631, 633, 635,
651, 653, 655, 657, 659, 661,
859, 861, 863, 865, 867, 869,
885, 887, 889, 891, 893, 895,
911, 913, 915, 917, 919, 921,
937, 939, 941, 943, 945, 947,
963, 965, 967, 969, 971, 973,
989, 991, 993, 995, 997, 999,
1197, 1199, 1201, 1203, 1205, 1207,
1223, 1225, 1227, 1229, 1231, 1233,
1249, 1251, 1253, 1255, 1257, 1259,
1275, 1277, 1279, 1281, 1283, 1285,
1301, 1303, 1305, 1307, 1309, 1311,
1327, 1329, 1331, 1333, 1335, 1337,
1535, 1537, 1539, 1541, 1543, 1545,
1561, 1563, 1565, 1567, 1569, 1571,
1587, 1589, 1591, 1593, 1595, 1597,
1613, 1615, 1617, 1619, 1621, 1623,
1639, 1641, 1643, 1645, 1647, 1649,
1665, 1667, 1669, 1671, 1673, 1675,
1873, 1875, 1877, 1879, 1881, 1883,
1899, 1901, 1903, 1905, 1907, 1909,
1925, 1927, 1929, 1931, 1933, 1935,
1951, 1953, 1955, 1957, 1959, 1961,
1977, 1979, 1981, 1983, 1985, 1987,
2003, 2005, 2007, 2009, 2011, 2013};
int main (int argc, char* argv[])
{
double * base = (double*) malloc(sizeof(double)* (2013+12+1));
if (base == 0)
{
printf ("Error in malloc(). Aborting ...\n");
return 1;
}
double * xa1 = base;
double * xa2 = xa1 + 12;
int i;
// initialize segments touched by indexSet
#pragma omp parallel for
for (i =521; i<= 2025; ++i)
{
base[i]=0.5*i;
}
for (i =0; i< N; ++i)
{
int idx = indexSet[i];
xa1[idx]+= 1.0;
xa2[idx]+= 3.0;
}
printf("x1[999]=%lf xa2[1285]=%lf\n", xa1[999], xa2[1285]);
free (base);
return 0;
}
|
t_cholmod_super_numeric.c | /* ========================================================================== */
/* === Supernodal/t_cholmod_super_numeric =================================== */
/* ========================================================================== */
/* -----------------------------------------------------------------------------
* CHOLMOD/Supernodal Module. Copyright (C) 2005-2012, Timothy A. Davis
* The CHOLMOD/Supernodal Module is licensed under Version 2.0 of the GNU
* General Public License. See gpl.txt for a text of the license.
* CHOLMOD is also available under other licenses; contact authors for details.
* http://www.suitesparse.com
* -------------------------------------------------------------------------- */
/* Template routine for cholmod_super_numeric. All xtypes supported, except
* that a zomplex A and F result in a complex L (there is no supernodal
* zomplex L).
*/
/* ========================================================================== */
/* === complex arithmetic =================================================== */
/* ========================================================================== */
#include "cholmod_template.h"
#undef L_ENTRY
#undef L_CLEAR
#undef L_ASSIGN
#undef L_MULTADD
#undef L_ASSEMBLE
#undef L_ASSEMBLESUB
#ifdef REAL
/* -------------------------------------------------------------------------- */
/* A, F, and L are all real */
/* -------------------------------------------------------------------------- */
#define L_ENTRY 1
#define L_CLEAR(Lx,p) Lx [p] = 0
#define L_ASSIGN(Lx,q, Ax,Az,p) Lx [q] = Ax [p]
#define L_MULTADD(Lx,q, Ax,Az,p, f) Lx [q] += Ax [p] * f [0]
#define L_ASSEMBLE(Lx,q,b) Lx [q] += b [0]
#define L_ASSEMBLESUB(Lx,q,C,p) Lx [q] -= C [p]
#else
/* -------------------------------------------------------------------------- */
/* A and F are complex or zomplex, L and C are complex */
/* -------------------------------------------------------------------------- */
#define L_ENTRY 2
#define L_CLEAR(Lx,p) Lx [2*(p)] = 0 ; Lx [2*(p)+1] = 0
#define L_ASSEMBLE(Lx,q,b) Lx [2*(q)] += b [0] ;
#define L_ASSEMBLESUB(Lx,q,C,p) \
Lx [2*(q) ] -= C [2*(p) ] ; \
Lx [2*(q)+1] -= C [2*(p)+1] ;
#ifdef COMPLEX
/* -------------------------------------------------------------------------- */
/* A, F, L, and C are all complex */
/* -------------------------------------------------------------------------- */
#define L_ASSIGN(Lx,q, Ax,Az,p) \
Lx [2*(q) ] = Ax [2*(p) ] ; \
Lx [2*(q)+1] = Ax [2*(p)+1]
#define L_MULTADD(Lx,q, Ax,Az,p, f) \
Lx [2*(q) ] += Ax [2*(p) ] * f [0] - Ax [2*(p)+1] * f [1] ; \
Lx [2*(q)+1] += Ax [2*(p)+1] * f [0] + Ax [2*(p) ] * f [1]
#else
/* -------------------------------------------------------------------------- */
/* A and F are zomplex, L and C is complex */
/* -------------------------------------------------------------------------- */
#define L_ASSIGN(Lx,q, Ax,Az,p) \
Lx [2*(q) ] = Ax [p] ; \
Lx [2*(q)+1] = Az [p] ;
#define L_MULTADD(Lx,q, Ax,Az,p, f) \
Lx [2*(q) ] += Ax [p] * f [0] - Az [p] * f [1] ; \
Lx [2*(q)+1] += Az [p] * f [0] + Ax [p] * f [1]
#endif
#endif
/* ========================================================================== */
/* === t_cholmod_super_numeric ============================================== */
/* ========================================================================== */
/* This function returns FALSE only if integer overflow occurs in the BLAS.
* It returns TRUE otherwise whether or not the matrix is positive definite. */
static int TEMPLATE (cholmod_super_numeric)
(
/* ---- input ---- */
cholmod_sparse *A, /* matrix to factorize */
cholmod_sparse *F, /* F = A' or A(:,f)' */
double beta [2], /* beta*I is added to diagonal of matrix to factorize */
/* ---- in/out --- */
cholmod_factor *L, /* factorization */
/* -- workspace -- */
cholmod_dense *Cwork, /* size (L->maxcsize)-by-1 */
/* --------------- */
cholmod_common *Common
)
{
double one [2], zero [2], tstart ;
double *Lx, *Ax, *Fx, *Az, *Fz, *C ;
Int *Super, *Head, *Ls, *Lpi, *Lpx, *Map, *SuperMap, *RelativeMap, *Next,
*Lpos, *Fp, *Fi, *Fnz, *Ap, *Ai, *Anz, *Iwork, *Next_save, *Lpos_save,
*Previous;
Int nsuper, n, j, i, k, s, p, pend, k1, k2, nscol, psi, psx, psend, nsrow,
pj, d, kd1, kd2, info, ndcol, ndrow, pdi, pdx, pdend, pdi1, pdi2, pdx1,
ndrow1, ndrow2, px, dancestor, sparent, dnext, nsrow2, ndrow3, pk, pf,
pfend, stype, Apacked, Fpacked, q, imap, repeat_supernode, nscol2, ss,
tail, nscol_new = 0;
/* ---------------------------------------------------------------------- */
/* declarations for the GPU */
/* ---------------------------------------------------------------------- */
/* these variables are not used if the GPU module is not installed */
#ifdef GPU_BLAS
Int ndescendants, mapCreatedOnGpu, supernodeUsedGPU,
idescendant, dlarge, dsmall, skips ;
int iHostBuff, iDevBuff, useGPU, GPUavailable ;
cholmod_gpu_pointers *gpu_p, gpu_pointer_struct ;
gpu_p = &gpu_pointer_struct ;
#endif
/* ---------------------------------------------------------------------- */
/* guard against integer overflow in the BLAS */
/* ---------------------------------------------------------------------- */
/* If integer overflow occurs in the BLAS, Common->status is set to
* CHOLMOD_TOO_LARGE, and the contents of Lx are undefined. */
Common->blas_ok = TRUE ;
/* ---------------------------------------------------------------------- */
/* get inputs */
/* ---------------------------------------------------------------------- */
nsuper = L->nsuper ;
n = L->n ;
C = Cwork->x ; /* workspace of size L->maxcsize */
one [0] = 1.0 ; /* ALPHA for *syrk, *herk, *gemm, and *trsm */
one [1] = 0. ;
zero [0] = 0. ; /* BETA for *syrk, *herk, and *gemm */
zero [1] = 0. ;
/* ensure there is sufficient integer workspace */
CHOLMOD(allocate_work)(0, 2*n+5*nsuper, 0, Common);
/* allocate integer workspace */
Iwork = Common->Iwork ;
SuperMap = Iwork ; /* size n (i/i/l) */
RelativeMap = Iwork + n ; /* size n (i/i/l) */
Next = Iwork + 2*((size_t) n) ; /* size nsuper*/
Lpos = Iwork + 2*((size_t) n) + nsuper ; /* size nsuper*/
Next_save = Iwork + 2*((size_t) n) + 2*((size_t) nsuper) ;/* size nsuper*/
Lpos_save = Iwork + 2*((size_t) n) + 3*((size_t) nsuper) ;/* size nsuper*/
Previous = Iwork + 2*((size_t) n) + 4*((size_t) nsuper) ;/* size nsuper*/
Map = Common->Flag ; /* size n, use Flag as workspace for Map array */
Head = Common->Head ; /* size n+1, only Head [0..nsuper-1] used */
Ls = L->s ;
Lpi = L->pi ;
Lpx = L->px ;
Super = L->super ;
Lx = L->x ;
#ifdef GPU_BLAS
/* local copy of useGPU */
if ( (Common->useGPU == 1) && L->useGPU)
{
/* Initialize the GPU. If not found, don't use it. */
useGPU = TEMPLATE2 (CHOLMOD (gpu_init))
(C, L, Common, nsuper, n, Lpi[nsuper]-Lpi[0], gpu_p) ;
}
else
{
useGPU = 0;
}
/* fprintf (stderr, "local useGPU %d\n", useGPU) ; */
#endif
#ifndef NTIMER
/* clear GPU / CPU statistics */
Common->CHOLMOD_CPU_GEMM_CALLS = 0 ;
Common->CHOLMOD_CPU_SYRK_CALLS = 0 ;
Common->CHOLMOD_CPU_TRSM_CALLS = 0 ;
Common->CHOLMOD_CPU_POTRF_CALLS = 0 ;
Common->CHOLMOD_GPU_GEMM_CALLS = 0 ;
Common->CHOLMOD_GPU_SYRK_CALLS = 0 ;
Common->CHOLMOD_GPU_TRSM_CALLS = 0 ;
Common->CHOLMOD_GPU_POTRF_CALLS = 0 ;
Common->CHOLMOD_CPU_GEMM_TIME = 0 ;
Common->CHOLMOD_CPU_SYRK_TIME = 0 ;
Common->CHOLMOD_CPU_TRSM_TIME = 0 ;
Common->CHOLMOD_CPU_POTRF_TIME = 0 ;
Common->CHOLMOD_GPU_GEMM_TIME = 0 ;
Common->CHOLMOD_GPU_SYRK_TIME = 0 ;
Common->CHOLMOD_GPU_TRSM_TIME = 0 ;
Common->CHOLMOD_GPU_POTRF_TIME = 0 ;
Common->CHOLMOD_ASSEMBLE_TIME = 0 ;
Common->CHOLMOD_ASSEMBLE_TIME2 = 0 ;
#endif
stype = A->stype ;
if (stype != 0)
{
/* F not accessed */
Fp = NULL ;
Fi = NULL ;
Fx = NULL ;
Fz = NULL ;
Fnz = NULL ;
Fpacked = TRUE ;
}
else
{
Fp = F->p ;
Fi = F->i ;
Fx = F->x ;
Fz = F->z ;
Fnz = F->nz ;
Fpacked = F->packed ;
}
Ap = A->p ;
Ai = A->i ;
Ax = A->x ;
Az = A->z ;
Anz = A->nz ;
Apacked = A->packed ;
/* clear the Map so that changes in the pattern of A can be detected */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
if ( n > 128 ) schedule (static)
for (i = 0 ; i < n ; i++)
{
Map [i] = EMPTY ;
}
/* If the matrix is not positive definite, the supernode s containing the
* first zero or negative diagonal entry of L is repeated (but factorized
* only up to just before the problematic diagonal entry). The purpose is
* to provide MATLAB with [R,p]=chol(A); columns 1 to p-1 of L=R' are
* required, where L(p,p) is the problematic diagonal entry. The
* repeat_supernode flag tells us whether this is the repeated supernode.
* Once supernode s is repeated, the factorization is terminated. */
repeat_supernode = FALSE ;
#ifdef GPU_BLAS
if ( useGPU )
{
/* Case of GPU, zero all supernodes at one time for better performance*/
TEMPLATE2 (CHOLMOD (gpu_clear_memory))(Lx, L->xsize,
CHOLMOD_OMP_NUM_THREADS);
}
#endif
/* ---------------------------------------------------------------------- */
/* supernodal numerical factorization */
/* ---------------------------------------------------------------------- */
for (s = 0 ; s < nsuper ; s++)
{
/* ------------------------------------------------------------------ */
/* get the size of supernode s */
/* ------------------------------------------------------------------ */
k1 = Super [s] ; /* s contains columns k1 to k2-1 of L */
k2 = Super [s+1] ;
nscol = k2 - k1 ; /* # of columns in all of s */
psi = Lpi [s] ; /* pointer to first row of s in Ls */
psx = Lpx [s] ; /* pointer to first row of s in Lx */
psend = Lpi [s+1] ; /* pointer just past last row of s in Ls */
nsrow = psend - psi ; /* # of rows in all of s */
PRINT1 (("====================================================\n"
"S "ID" k1 "ID" k2 "ID" nsrow "ID" nscol "ID" psi "ID" psend "
""ID" psx "ID"\n", s, k1, k2, nsrow, nscol, psi, psend, psx)) ;
/* ------------------------------------------------------------------ */
/* zero the supernode s */
/* ------------------------------------------------------------------ */
ASSERT ((size_t) (psx + nsrow*nscol) <= L->xsize) ;
pend = psx + nsrow * nscol ; /* s is nsrow-by-nscol */
#ifdef GPU_BLAS
if ( !useGPU )
#endif
{
/* Case of no GPU, zero individual supernodes */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
schedule (static) if ( pend - psx > 1024 )
for (p = psx ; p < pend ; p++) {
L_CLEAR (Lx,p);
}
}
/* ------------------------------------------------------------------ */
/* construct the scattered Map for supernode s */
/* ------------------------------------------------------------------ */
/* If row i is the kth row in s, then Map [i] = k. Similarly, if
* column j is the kth column in s, then Map [j] = k. */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
if ( nsrow > 128 )
for (k = 0 ; k < nsrow ; k++)
{
PRINT1 ((" "ID" map "ID"\n", Ls [psi+k], k)) ;
Map [Ls [psi + k]] = k ;
}
/* ------------------------------------------------------------------ */
/* when using GPU, reorder supernodes by levels.*/
/* (all supernodes in a level are independent) */
/* ------------------------------------------------------------------ */
#ifdef GPU_BLAS
if ( useGPU )
{
TEMPLATE2 (CHOLMOD (gpu_reorder_descendants))
( Common, Super, &s, Lpi, Lpos, Head, Next, Previous,
&ndescendants, &tail, &mapCreatedOnGpu, gpu_p ) ;
}
#endif
/* ------------------------------------------------------------------ */
/* copy matrix into supernode s (lower triangular part only) */
/* ------------------------------------------------------------------ */
pk = psx ;
#pragma omp parallel for private ( p, pend, pfend, pf, i, j, imap, q ) \
num_threads(CHOLMOD_OMP_NUM_THREADS) if ( k2-k1 > 64 )
for (k = k1 ; k < k2 ; k++)
{
if (stype != 0)
{
/* copy the kth column of A into the supernode */
p = Ap [k] ;
pend = (Apacked) ? (Ap [k+1]) : (p + Anz [k]) ;
for ( ; p < pend ; p++)
{
/* row i of L is located in row Map [i] of s */
i = Ai [p] ;
if (i >= k)
{
/* This test is here simply to avoid a segfault. If
* the test is false, the numeric factorization of A
* is undefined. It does not detect all invalid
* entries, only some of them (when debugging is
* enabled, and Map is cleared after each step, then
* all entries not in the pattern of L are detected). */
imap = Map [i] ;
if (imap >= 0 && imap < nsrow)
{
/* Lx [Map [i] + pk] = Ax [p] ; */
L_ASSIGN (Lx,(imap+(psx+(k-k1)*nsrow)), Ax,Az,p) ;
}
}
}
}
else
{
double fjk[2];
/* copy the kth column of A*F into the supernode */
pf = Fp [k] ;
pfend = (Fpacked) ? (Fp [k+1]) : (p + Fnz [k]) ;
for ( ; pf < pfend ; pf++)
{
j = Fi [pf] ;
/* fjk = Fx [pf] ; */
L_ASSIGN (fjk,0, Fx,Fz,pf) ;
p = Ap [j] ;
pend = (Apacked) ? (Ap [j+1]) : (p + Anz [j]) ;
for ( ; p < pend ; p++)
{
i = Ai [p] ;
if (i >= k)
{
/* See the discussion of imap above. */
imap = Map [i] ;
if (imap >= 0 && imap < nsrow)
{
/* Lx [Map [i] + pk] += Ax [p] * fjk ; */
L_MULTADD (Lx,(imap+(psx+(k-k1)*nsrow)),
Ax,Az,p, fjk) ;
}
}
}
}
}
}
/* add beta to the diagonal of the supernode, if nonzero */
if (beta [0] != 0.0)
{
/* note that only the real part of beta is used */
pk = psx ;
for (k = k1 ; k < k2 ; k++)
{
/* Lx [pk] += beta [0] ; */
L_ASSEMBLE (Lx,pk, beta) ;
pk += nsrow + 1 ; /* advance to the next diagonal entry */
}
}
PRINT1 (("Supernode with just A: repeat: "ID"\n", repeat_supernode)) ;
DEBUG (CHOLMOD(dump_super) (s, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
PRINT1 (("\n\n")) ;
/* ------------------------------------------------------------------ */
/* save/restore the list of supernodes */
/* ------------------------------------------------------------------ */
if (!repeat_supernode)
{
/* Save the list of pending descendants in case s is not positive
* definite. Also save Lpos for each descendant d, so that we can
* find which part of d is used to update s. */
for (d = Head [s] ; d != EMPTY ; d = Next [d])
{
Lpos_save [d] = Lpos [d] ;
Next_save [d] = Next [d] ;
}
}
else
{
for (d = Head [s] ; d != EMPTY ; d = Next [d])
{
Lpos [d] = Lpos_save [d] ;
Next [d] = Next_save [d] ;
}
}
/* ------------------------------------------------------------------ */
/* update supernode s with each pending descendant d */
/* ------------------------------------------------------------------ */
#ifndef NDEBUG
for (d = Head [s] ; d != EMPTY ; d = Next [d])
{
PRINT1 (("\nWill update "ID" with Child: "ID"\n", s, d)) ;
DEBUG (CHOLMOD(dump_super) (d, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
}
PRINT1 (("\nNow factorizing supernode "ID":\n", s)) ;
#endif
#ifdef GPU_BLAS
/* initialize the buffer counter */
if ( useGPU ) {
Common->ibuffer = 0;
supernodeUsedGPU = 0;
idescendant = 0;
d = Head[s];
dnext = d;
dlarge = Next[d];
dsmall = tail;
GPUavailable = 1;
skips = 0;
}
else
{
dnext = Head[s];
}
#else
/* GPU module not installed */
dnext = Head[s];
#endif
while
#ifdef GPU_BLAS
( (!useGPU && (dnext != EMPTY))
|| (useGPU && (idescendant < ndescendants)))
#else
( dnext != EMPTY )
#endif
{
#ifdef GPU_BLAS
if ( useGPU ) {
/* Conditionally select the next descendant supernode to
* assemble.
* + first, select the largest descendant
* + subsequently, if gpu host buffers are available, select
* the largest remaining descendant for assembly on the GPU
* + otherwise select the smallest remaining descendant for
* assembly on the CPU
*
* The objective is to keep the GPU busy assembling the largest
* descendants, and simultaneously keep the CPU busy assembling
* the smallest descendants.
*
* As this is called for every descendent supernode, moving
* this code to t_cholmod_gpu incurs substantial overhead -
* ~20 GF/s on audikw_1 - so it is being left here.
*/
iHostBuff =
(Common->ibuffer) % CHOLMOD_HOST_SUPERNODE_BUFFERS;
cudaError_t cuErr;
if ( idescendant > 0 ) {
if ( GPUavailable == -1 || skips > 0) {
d = dsmall;
dsmall = Previous[dsmall];
skips--;
}
else {
cuErr = cudaEventQuery
( Common->updateCBuffersFree[iHostBuff] );
if ( cuErr == cudaSuccess ) {
/* buffers are available, so assemble a large
* descendant (anticipating that this will be
* assembled on the GPU) */
d = dlarge;
dlarge = Next[dlarge];
GPUavailable = 1;
skips = 0;
}
else {
/* buffers are not available, so the GPU is busy,
* so assemble a small descendant (anticipating
* that it will be assembled on the host) */
d = dsmall;
dsmall = Previous[dsmall];
GPUavailable = 0;
/* if the GPUs are busy, then do this many
* supernodes on the CPU before querying GPUs
* again. */
skips = CHOLMOD_GPU_SKIP;
}
}
}
idescendant++;
}
else
{
d = dnext;
}
#else
/* GPU module not installed at compile time */
d = dnext ;
#endif
/* -------------------------------------------------------------- */
/* get the size of supernode d */
/* -------------------------------------------------------------- */
kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */
kd2 = Super [d+1] ;
ndcol = kd2 - kd1 ; /* # of columns in all of d */
pdi = Lpi [d] ; /* pointer to first row of d in Ls */
pdx = Lpx [d] ; /* pointer to first row of d in Lx */
pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */
ndrow = pdend - pdi ; /* # rows in all of d */
PRINT1 (("Child: ")) ;
DEBUG (CHOLMOD(dump_super) (d, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
/* -------------------------------------------------------------- */
/* find the range of rows of d that affect rows k1 to k2-1 of s */
/* -------------------------------------------------------------- */
p = Lpos [d] ; /* offset of 1st row of d affecting s */
pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */
pdx1 = pdx + p ; /* ptr to 1st row of d affecting s in Lx */
/* there must be at least one row remaining in d to update s */
ASSERT (pdi1 < pdend) ;
PRINT1 (("Lpos[d] "ID" pdi1 "ID" Ls[pdi1] "ID"\n",
Lpos[d], pdi1, Ls [pdi1])) ;
ASSERT (Ls [pdi1] >= k1 && Ls [pdi1] < k2) ;
for (pdi2 = pdi1 ; pdi2 < pdend && Ls [pdi2] < k2 ; pdi2++) ;
ndrow1 = pdi2 - pdi1 ; /* # rows in first part of d */
ndrow2 = pdend - pdi1 ; /* # rows in remaining d */
/* rows Ls [pdi1 ... pdi2-1] are in the range k1 to k2-1. Since d
* affects s, this set cannot be empty. */
ASSERT (pdi1 < pdi2 && pdi2 <= pdend) ;
PRINT1 (("ndrow1 "ID" ndrow2 "ID"\n", ndrow1, ndrow2)) ;
DEBUG (for (p = pdi1 ; p < pdi2 ; p++)
PRINT1 (("Ls["ID"] "ID"\n", p, Ls[p]))) ;
/* -------------------------------------------------------------- */
/* construct the update matrix C for this supernode d */
/* -------------------------------------------------------------- */
/* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except
* that k1:n-1 refers to all of the rows in L, but many of the
* rows are all zero. Supernode d holds columns kd1 to kd2-1 of L.
* Nonzero rows in the range k1:k2-1 are in the list
* Ls [pdi1 ... pdi2-1], of size ndrow1. Nonzero rows in the range
* k2:n-1 are in the list Ls [pdi2 ... pdend], of size ndrow2. Let
* L1 = L (Ls [pdi1 ... pdi2-1], kd1:kd2-1), and let
* L2 = L (Ls [pdi2 ... pdend], kd1:kd2-1). C is ndrow2-by-ndrow1.
* Let C1 be the first ndrow1 rows of C and let C2 be the last
* ndrow2-ndrow1 rows of C. Only the lower triangular part of C1
* needs to be computed since C1 is symmetric.
*/
/* maxcsize is the largest size of C for all pairs (d,s) */
ASSERT (ndrow2 * ndrow1 <= ((Int) L->maxcsize)) ;
/* compute leading ndrow1-by-ndrow1 lower triangular block of C,
* C1 = L1*L1' */
ndrow3 = ndrow2 - ndrow1 ; /* number of rows of C2 */
ASSERT (ndrow3 >= 0) ;
#ifdef GPU_BLAS
if ( useGPU ) {
/* set up GPU to assemble new supernode */
if ( GPUavailable == 1) {
if ( ndrow2 * L_ENTRY >= CHOLMOD_ND_ROW_LIMIT &&
ndcol * L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) {
if ( ! mapCreatedOnGpu ) {
TEMPLATE2 ( CHOLMOD (gpu_initialize_supernode))
( Common, nscol, nsrow, psi, gpu_p );
mapCreatedOnGpu = 1;
}
}
else {
/* we've reached the limit of GPU-eligible descendants
* flag to stop stop performing cudaEventQueries */
GPUavailable = -1;
}
}
}
#endif
#ifdef GPU_BLAS
if ( !useGPU
|| GPUavailable!=1
|| !TEMPLATE2 (CHOLMOD (gpu_updateC)) (ndrow1, ndrow2, ndrow,
ndcol, nsrow, pdx1, pdi1, Lx, C, Common, gpu_p))
#endif
{
/* GPU not installed, or not used */
#ifndef NTIMER
Common->CHOLMOD_CPU_SYRK_CALLS++ ;
tstart = SuiteSparse_time () ;
#endif
#ifdef REAL
BLAS_dsyrk ("L", "N",
ndrow1, ndcol, /* N, K: L1 is ndrow1-by-ndcol*/
one, /* ALPHA: 1 */
Lx + L_ENTRY*pdx1, ndrow, /* A, LDA: L1, ndrow */
zero, /* BETA: 0 */
C, ndrow2) ; /* C, LDC: C1 */
#else
BLAS_zherk ("L", "N",
ndrow1, ndcol, /* N, K: L1 is ndrow1-by-ndcol*/
one, /* ALPHA: 1 */
Lx + L_ENTRY*pdx1, ndrow, /* A, LDA: L1, ndrow */
zero, /* BETA: 0 */
C, ndrow2) ; /* C, LDC: C1 */
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_SYRK_TIME += SuiteSparse_time () - tstart ;
#endif
/* compute remaining (ndrow2-ndrow1)-by-ndrow1 block of C,
* C2 = L2*L1' */
if (ndrow3 > 0)
{
#ifndef NTIMER
Common->CHOLMOD_CPU_GEMM_CALLS++ ;
tstart = SuiteSparse_time () ;
#endif
#ifdef REAL
BLAS_dgemm ("N", "C",
ndrow3, ndrow1, ndcol, /* M, N, K */
one, /* ALPHA: 1 */
Lx + L_ENTRY*(pdx1 + ndrow1), /* A, LDA: L2 */
ndrow, /* ndrow */
Lx + L_ENTRY*pdx1, /* B, LDB: L1 */
ndrow, /* ndrow */
zero, /* BETA: 0 */
C + L_ENTRY*ndrow1, /* C, LDC: C2 */
ndrow2) ;
#else
BLAS_zgemm ("N", "C",
ndrow3, ndrow1, ndcol, /* M, N, K */
one, /* ALPHA: 1 */
Lx + L_ENTRY*(pdx1 + ndrow1), /* A, LDA: L2 */
ndrow, /* ndrow */
Lx + L_ENTRY*pdx1, /* B, LDB: L1, ndrow */
ndrow,
zero, /* BETA: 0 */
C + L_ENTRY*ndrow1, /* C, LDC: C2 */
ndrow2) ;
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_GEMM_TIME +=
SuiteSparse_time () - tstart ;
#endif
}
/* ---------------------------------------------------------- */
/* construct relative map to assemble d into s */
/* ---------------------------------------------------------- */
DEBUG (CHOLMOD(dump_real) ("C", C, ndrow2, ndrow1, TRUE,
L_ENTRY, Common)) ;
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
if ( ndrow2 > 64 )
for (i = 0 ; i < ndrow2 ; i++)
{
RelativeMap [i] = Map [Ls [pdi1 + i]] ;
ASSERT (RelativeMap [i] >= 0 && RelativeMap [i] < nsrow) ;
}
/* ---------------------------------------------------------- */
/* assemble C into supernode s using the relative map */
/* ---------------------------------------------------------- */
#pragma omp parallel for private ( j, i, px, q ) \
num_threads(CHOLMOD_OMP_NUM_THREADS) if (ndrow1 > 64 )
for (j = 0 ; j < ndrow1 ; j++) /* cols k1:k2-1 */
{
ASSERT (RelativeMap [j] == Map [Ls [pdi1 + j]]) ;
ASSERT (RelativeMap [j] >= 0 && RelativeMap [j] < nscol) ;
px = psx + RelativeMap [j] * nsrow ;
for (i = j ; i < ndrow2 ; i++) /* rows k1:n-1 */
{
ASSERT (RelativeMap [i] == Map [Ls [pdi1 + i]]) ;
ASSERT (RelativeMap [i] >= j && RelativeMap[i] < nsrow);
/* Lx [px + RelativeMap [i]] -= C [i + pj] ; */
q = px + RelativeMap [i] ;
L_ASSEMBLESUB (Lx,q, C, i+ndrow2*j) ;
}
}
}
#ifdef GPU_BLAS
else
{
supernodeUsedGPU = 1; /* GPU was used for this supernode*/
Common->ibuffer++; /* gpu_updateC is asynchronous, so use
* the next host buffer for the next
* supernode */
Common->ibuffer = Common->ibuffer%
(CHOLMOD_HOST_SUPERNODE_BUFFERS*CHOLMOD_DEVICE_STREAMS);
}
#endif
/* -------------------------------------------------------------- */
/* prepare this supernode d for its next ancestor */
/* -------------------------------------------------------------- */
dnext = Next [d] ;
if (!repeat_supernode)
{
/* If node s is being repeated, Head [dancestor] has already
* been cleared (set to EMPTY). It must remain EMPTY. The
* dancestor will not be factorized since the factorization
* terminates at node s. */
Lpos [d] = pdi2 - pdi ;
if (Lpos [d] < ndrow)
{
dancestor = SuperMap [Ls [pdi2]] ;
ASSERT (dancestor > s && dancestor < nsuper) ;
/* place d in the link list of its next ancestor */
Next [d] = Head [dancestor] ;
Head [dancestor] = d ;
}
}
} /* end of descendant supernode loop */
#ifdef GPU_BLAS
if ( useGPU ) {
iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS;
iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS;
/* combine updates assembled on the GPU with updates
* assembled on the CPU */
TEMPLATE2 ( CHOLMOD (gpu_final_assembly ))
( Common, Lx, psx, nscol, nsrow, supernodeUsedGPU,
&iHostBuff, &iDevBuff, gpu_p );
}
#endif
PRINT1 (("\nSupernode with contributions A: repeat: "ID"\n",
repeat_supernode)) ;
DEBUG (CHOLMOD(dump_super) (s, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
PRINT1 (("\n\n")) ;
/* ------------------------------------------------------------------ */
/* factorize diagonal block of supernode s in LL' */
/* ------------------------------------------------------------------ */
/* The current supernode s is ready to factorize. It has been updated
* by all descendant supernodes. Let S = the current supernode, which
* holds rows k1:n-1 and columns k1:k2-1 of the updated matrix. It
* splits into two parts: the square diagonal block S1, and the
* rectangular part S2. Here, S1 is factorized into L1*L1' and
* overwritten by L1.
*
* If supernode s is being repeated, only factorize it up to but not
* including the column containing the problematic entry.
*/
nscol2 = (repeat_supernode) ? (nscol_new) : (nscol) ;
#ifdef GPU_BLAS
if ( !useGPU
|| !supernodeUsedGPU
|| !TEMPLATE2 (CHOLMOD (gpu_lower_potrf))(nscol2, nsrow, psx, Lx,
&info, Common, gpu_p))
#endif
{
/* Note that the GPU will not be used for the triangular solve */
#ifdef GPU_BLAS
supernodeUsedGPU = 0;
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_POTRF_CALLS++ ;
tstart = SuiteSparse_time () ;
#endif
#ifdef REAL
LAPACK_dpotrf ("L",
nscol2, /* N: nscol2 */
Lx + L_ENTRY*psx, nsrow, /* A, LDA: S1, nsrow */
info) ; /* INFO */
#else
LAPACK_zpotrf ("L",
nscol2, /* N: nscol2 */
Lx + L_ENTRY*psx, nsrow, /* A, LDA: S1, nsrow */
info) ; /* INFO */
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_POTRF_TIME += SuiteSparse_time ()- tstart ;
#endif
}
/* ------------------------------------------------------------------ */
/* check if the matrix is not positive definite */
/* ------------------------------------------------------------------ */
if (repeat_supernode)
{
/* the leading part has been refactorized; it must have succeeded */
info = 0 ;
/* zero out the rest of this supernode */
p = psx + nsrow * nscol_new ;
pend = psx + nsrow * nscol ; /* s is nsrow-by-nscol */
for ( ; p < pend ; p++)
{
/* Lx [p] = 0 ; */
L_CLEAR (Lx,p) ;
}
}
/* info is set to one in LAPACK_*potrf if blas_ok is FALSE. It is
* set to zero in dpotrf/zpotrf if the factorization was successful. */
if (CHECK_BLAS_INT && !Common->blas_ok)
{
ERROR (CHOLMOD_TOO_LARGE, "problem too large for the BLAS") ;
}
if (info != 0)
{
/* Matrix is not positive definite. dpotrf/zpotrf do NOT report an
* error if the diagonal of L has NaN's, only if it has a zero. */
if (Common->status == CHOLMOD_OK)
{
ERROR (CHOLMOD_NOT_POSDEF, "matrix not positive definite") ;
}
/* L->minor is the column of L that contains a zero or negative
* diagonal term. */
L->minor = k1 + info - 1 ;
/* clear the link lists of all subsequent supernodes */
for (ss = s+1 ; ss < nsuper ; ss++)
{
Head [ss] = EMPTY ;
}
/* zero this supernode, and all remaining supernodes */
pend = L->xsize ;
for (p = psx ; p < pend ; p++)
{
/* Lx [p] = 0. ; */
L_CLEAR (Lx,p) ;
}
/* If L is indefinite, it still contains useful information.
* Supernodes 0 to s-1 are valid, similar to MATLAB [R,p]=chol(A),
* where the 1-based p is identical to the 0-based L->minor. Since
* L->minor is in the current supernode s, it and any columns to the
* left of it in supernode s are also all zero. This differs from
* [R,p]=chol(A), which contains nonzero rows 1 to p-1. Fix this
* by setting repeat_supernode to TRUE, and repeating supernode s.
*
* If Common->quick_return_if_not_posdef is true, then the entire
* supernode s is not factorized; it is left as all zero.
*/
if (info == 1 || Common->quick_return_if_not_posdef)
{
/* If the first column of supernode s contains a zero or
* negative diagonal entry, then it is already properly set to
* zero. Also, info will be 1 if integer overflow occured in
* the BLAS. */
Head [s] = EMPTY ;
#ifdef GPU_BLAS
if ( useGPU ) {
CHOLMOD (gpu_end) (Common) ;
}
#endif
return (Common->status >= CHOLMOD_OK) ;
}
else
{
/* Repeat supernode s, but only factorize it up to but not
* including the column containing the problematic diagonal
* entry. */
repeat_supernode = TRUE ;
s-- ;
nscol_new = info - 1 ;
continue ;
}
}
/* ------------------------------------------------------------------ */
/* compute the subdiagonal block and prepare supernode for its parent */
/* ------------------------------------------------------------------ */
nsrow2 = nsrow - nscol2 ;
if (nsrow2 > 0)
{
/* The current supernode is columns k1 to k2-1 of L. Let L1 be the
* diagonal block (factorized by dpotrf/zpotrf above; rows/cols
* k1:k2-1), and L2 be rows k2:n-1 and columns k1:k2-1 of L. The
* triangular system to solve is L2*L1' = S2, where S2 is
* overwritten with L2. More precisely, L2 = S2 / L1' in MATLAB
* notation.
*/
#ifdef GPU_BLAS
if ( !useGPU
|| !supernodeUsedGPU
|| !TEMPLATE2 (CHOLMOD(gpu_triangular_solve))
(nsrow2, nscol2, nsrow, psx, Lx, Common, gpu_p))
#endif
{
#ifndef NTIMER
Common->CHOLMOD_CPU_TRSM_CALLS++ ;
tstart = SuiteSparse_time () ;
#endif
#ifdef REAL
BLAS_dtrsm ("R", "L", "C", "N",
nsrow2, nscol2, /* M, N */
one, /* ALPHA: 1 */
Lx + L_ENTRY*psx, nsrow, /* A, LDA: L1, nsrow */
Lx + L_ENTRY*(psx + nscol2), /* B, LDB, L2, nsrow */
nsrow) ;
#else
BLAS_ztrsm ("R", "L", "C", "N",
nsrow2, nscol2, /* M, N */
one, /* ALPHA: 1 */
Lx + L_ENTRY*psx, nsrow, /* A, LDA: L1, nsrow */
Lx + L_ENTRY*(psx + nscol2), /* B, LDB, L2, nsrow */
nsrow) ;
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_TRSM_TIME += SuiteSparse_time () - tstart ;
#endif
}
if (CHECK_BLAS_INT && !Common->blas_ok)
{
ERROR (CHOLMOD_TOO_LARGE, "problem too large for the BLAS") ;
}
if (!repeat_supernode)
{
/* Lpos [s] is offset of first row of s affecting its parent */
Lpos [s] = nscol ;
sparent = SuperMap [Ls [psi + nscol]] ;
ASSERT (sparent != EMPTY) ;
ASSERT (Ls [psi + nscol] >= Super [sparent]) ;
ASSERT (Ls [psi + nscol] < Super [sparent+1]) ;
ASSERT (SuperMap [Ls [psi + nscol]] == sparent) ;
ASSERT (sparent > s && sparent < nsuper) ;
/* place s in link list of its parent */
Next [s] = Head [sparent] ;
Head [sparent] = s ;
}
}
else
{
#ifdef GPU_BLAS
TEMPLATE2 ( CHOLMOD (gpu_copy_supernode) )
( Common, Lx, psx, nscol, nscol2, nsrow,
supernodeUsedGPU, iHostBuff, gpu_p);
#endif
}
Head [s] = EMPTY ; /* link list for supernode s no longer needed */
/* clear the Map (debugging only, to detect changes in pattern of A) */
DEBUG (for (k = 0 ; k < nsrow ; k++) Map [Ls [psi + k]] = EMPTY) ;
DEBUG (CHOLMOD(dump_super) (s, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
if (repeat_supernode)
{
/* matrix is not positive definite; finished clean-up for supernode
* containing negative diagonal */
#ifdef GPU_BLAS
if ( useGPU )
{
CHOLMOD (gpu_end) (Common) ;
}
#endif
return (Common->status >= CHOLMOD_OK) ;
}
}
/* success; matrix is positive definite */
L->minor = n ;
#ifdef GPU_BLAS
if ( useGPU )
{
CHOLMOD (gpu_end) (Common) ;
}
#endif
return (Common->status >= CHOLMOD_OK) ;
}
#undef PATTERN
#undef REAL
#undef COMPLEX
#undef ZOMPLEX
|
GB_unop__minv_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__minv_fp32_fp32
// op(A') function: GB_unop_tran__minv_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = (1.0F)/z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__minv_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = (1.0F)/z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__minv_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
FriendAlignFinder.h | ///////////////////////////////////////////////////////////////////////////////
// SOFTWARE COPYRIGHT NOTICE AGREEMENT //
// This software and its documentation are copyright (2012) by the //
// Broad Institute. All rights are reserved. This software is supplied //
// without any warranty or guaranteed support whatsoever. The Broad //
// Institute is not responsible for its use, misuse, or functionality. //
///////////////////////////////////////////////////////////////////////////////
#ifndef FRIEND_ALIGNS_FINDER3_H
#define FRIEND_ALIGNS_FINDER3_H
// MakeDepend: library OMP
// MakeDepend: cflags OMP_FLAGS
#include "paths/long/FriendAligns.h"
#include "paths/long/MakeAlignments.h"
#include "kmers/ReadPather.h"
#include "kmers/ReadPatherDefs.h"
#include "Vec.h"
#include "ParallelVecUtilities.h"
// ====================== FriendAlignFinder implementations =============================
template <int K>
class FriendAlignFinder : public FriendAlignerImpl {
public:
// Explicitly if a read align is actually valid. Used to removed some
// false positives.
bool ValidateAlign( const simple_align_data& a ) const {
typedef bvec::const_iterator Itr;
bvec const& read1 = mReads[a.id1];
bvec read2RC;
bvec const& read2 = a.rc2 ?
read2RC.ReverseComplement(mReads[a.id2]) :
mReads[a.id2];
Itr it1 = read1.begin();
Itr it2 = read2.begin();
if ( a.offset > 0 )
{
ForceAssertLt(static_cast<unsigned>(a.offset),read1.size());
it1 += a.offset;
}
else
{
ForceAssertLt(static_cast<unsigned>(-a.offset),read2.size());
it2 -= a.offset;
}
Itr end = it1 + std::min(std::distance(it1,read1.end()),
std::distance(it2,read2.end()));
bool find_match = false;
while ( !find_match ) {
std::pair<Itr,Itr> mis_locs = mismatch( it1, end, it2 );
if ( mis_locs.first - it1 >= K )
find_match = true;
if ( mis_locs.first == end )
break;
it1 = mis_locs.first + 1;
it2 = mis_locs.second + 1;
}
return find_match;
}
// Data structure to store read location on the unipath
struct ReadLocOnUnipath {
EdgeID uid;
short int start;
unsigned int rid;
bool rc;
friend std::ostream& operator<<( std::ostream& out, const ReadLocOnUnipath& a ) {
char dir = ( a.rc ? '-' : '+' );
out << "loc " << a.rid<< "(" << a.start << "," << ")" << dir << "@" << a.uid.val() ;
return out;
}
friend bool operator<( const ReadLocOnUnipath& l, const ReadLocOnUnipath& r) {
if ( l.uid != r.uid ) return l.uid < r.uid;
return l.start < r.start;
}
};
// We could use more efficient containers. But just use vec of vec for this moment.
typedef vec<ReadLocOnUnipath> PathSegVec;
typedef vec<ReadLocOnUnipath> ReadULocVec;
FriendAlignFinder (const vecbvec& reads, const int max_freq = 1000, Bool use_down_sampling = False, int verb = 1 )
: mReads(reads), mpDict(NULL), mpGraph(NULL),
mCopyNumberMax( max_freq ), mUseDownSampling(use_down_sampling), mVerbose(verb)
{ Init(); }
FriendAlignFinder( const FriendAlignFinder& )=delete;
FriendAlignFinder& operator= ( const FriendAlignFinder& )=delete;
virtual ~FriendAlignFinder () { delete mpGraph; delete mpDict; }
// Find all alignments of one read
virtual void getAligns( size_t readId, Friends* pFriends )
{ vec<ReadLocOnUnipath> locvec;
PathOneRead( readId, &locvec );
// Check for bad alignments
return GetAlignsOneReadUnsorted( readId, locvec, pFriends ); }
private:
void Init( unsigned int coverage = 5, unsigned int nThreads = 0) {
// ========= build the kmer dictionary =========
if ( mVerbose >= 1 )
std::cout << Date() << ": creating dictionary" << std::endl;
size_t dictSize = mReads.SizeSum() / coverage;
mpDict = new KmerDict<K> ( 5*dictSize/4 );
mpDict->process(mReads,mVerbose,false,nThreads,100);
if ( mVerbose >= 1 ) {
std::cout << Date( ) << ": there are " << mpDict->size()
<< " kmers (expected ~" << dictSize << ")" << std::endl;
ReportMemUsage();
}
size_t old_dict_size = mpDict->size();
//mpDict->clean( typename KmerDict<K>::BadKmerCountFunctor(2, mCopyNumberMax));
// the result seems better without using mCopyNumberMax at this stage
mpDict->clean( typename KmerDict<K>::BadKmerCountFunctor(2));
if ( mVerbose >= 1 )
std::cout << Date() << ": Cleaning bad kmers, keeping " << mpDict->size()
<< "(" << ( mpDict->size() * 100 / old_dict_size ) << "%)" << std::endl;
// ========= build the unipath graph =========
mpGraph = new UnipathGraph<K>(*mpDict, mVerbose);
if ( mVerbose >= 1 ) ReportMemUsage();
// ======== Index the read locs on unipaths
GenerateReadLocs();
if ( mVerbose >= 1 ) ReportMemUsage();
}
void GenerateReadLocs( ) {
int64_t total_locs_deleted = 0;
mULocs.clear_and_resize( mpGraph->getNEdges() );
#pragma omp parallel for schedule(dynamic, 100)
for( size_t iread = 0; iread < mReads.size(); iread++ ) {
vec<ReadLocOnUnipath> locvec;
PathOneRead( iread, &locvec );
int num_locs_deleted = 0;
if ( mUseDownSampling )
num_locs_deleted = DownSampleLocsOfOneRead( &locvec );
#pragma omp critical
{
total_locs_deleted += num_locs_deleted;
for ( size_t j = 0; j < locvec.size(); ++j )
mULocs[ locvec[j].uid.val() ].push_back( locvec[j] );
}
}
#pragma omp parallel for schedule(dynamic, 100)
for ( size_t i = 0; i < mULocs.size(); ++i ) {
int ulen = mpGraph->getEdge( EdgeID(i) ).getLength();
if ( ulen < 5 && mULocs[i].isize() > mCopyNumberMax )
mULocs[i].clear();
else
Sort(mULocs[i]);
}
uint64_t total = SizeSum( mULocs );
if ( mVerbose >= 1 )
std::cout << Date() << ": Found " << ToStringAddCommas( total ) << " locs"
<< " after deleting " << ToStringAddCommas(total_locs_deleted) << std::endl;
}
void GetAlignsOneReadUnsorted( size_t read_id,
const vec<ReadLocOnUnipath>& locvec,
Friends *pFriends ) const {
std::set<simple_align_data> uniq_aligns;
for( size_t i = 0; i < locvec.size(); ++i ) {
int nkmer1 = mReads[read_id].size() - K + 1;
const ReadLocOnUnipath& loc1 = locvec[i];
int stop1 = loc1.start + nkmer1;
const ReadULocVec& ulocvec = mULocs[ loc1.uid.val() ];
bool isPalindrome = mpGraph->getEdge(loc1.uid).isPalindrome();
for ( size_t x2 = 0; x2 < ulocvec.size(); ++x2 ) {
const ReadLocOnUnipath& loc2 = ulocvec[x2];
int nkmer2 = mReads[loc2.rid].size() - K + 1;
int stop2 = loc2.start + nkmer2;
if ( loc2.rid == loc1.rid ) continue;
if ( stop2 <= loc1.start ) continue;
if ( loc2.start >= stop1 ) continue;
{ // for all cases
Bool rc = loc2.rc ^ loc1.rc;
int offset2 = ( loc1.rc ? stop1 - stop2 : loc2.start - loc1.start );
simple_align_data a(loc1.rid, loc2.rid, offset2, rc);
uniq_aligns.insert( a );
//if ( ! ValidateAlign(a) ) {
// #pragma omp critical
// {
// std::cout << "Could not validate alignment ";
// std::cout << "read1 on " << loc1.start << "," << stop1
// << " read2 on " << loc2.start << "," << stop2 << std::endl;
// int ulen = mpGraph->getEdge( loc1.uid ).getLength();
// std::cout << "ulen= " << ulen << std::endl;
// }
//}
}
// Special treatmnet of palindrome cases, where the edge consists only
// one kmer, and both orientation of the kmers are the same and should
// all be considered!
if ( isPalindrome ) {
Bool rc = loc2.rc ^ loc1.rc ^ 1;
int stop2p = - loc2.start + 1;
int start2p = - stop2 + 1;
int offset2 = ( loc1.rc ? stop1 - stop2p : start2p - loc1.start );
simple_align_data a(loc1.rid, loc2.rid, offset2, rc);
uniq_aligns.insert( simple_align_data(loc1.rid, loc2.rid, offset2, rc) );
//if ( ! ValidateAlign(a) ) {
// #pragma omp critical
// {
// std::cout << "Could not validate alignment " << a.rc2 << std::endl;
// std::cout << "read1 on " << loc1.start << "," << stop1
// << " read2(palindrom) on " << loc2.start << "," << stop2
// << " reverted to " << start2p << "," << stop2p
// << std::endl;
// int ulen = mpGraph->getEdge( loc1.uid ).getLength();
// std::cout << "ulen= " << ulen << std::endl;
// }
//}
}
}
}
pFriends->clear();
int n_false_align = 0;
for( std::set<simple_align_data>::iterator it = uniq_aligns.begin(), end = uniq_aligns.end();
it != end; it++ ) {
if ( ValidateAlign( *it ) )
pFriends->push_back( Friend(it->id2,it->offset,it->rc2) );
else
n_false_align++;
}
}
// Pathing provided the read head on the unipaths graph
void PathOneRead ( size_t read_id, PathSegVec *loc_vec ) const {
std::set<ReadLocOnUnipath> locs;
const bvec& read = mReads[ read_id ];
int readLen = read.size();
int nkmers = readLen - K + 1;
if ( nkmers < 0 ) return;
// pathing
for ( int rpos = 0; rpos < nkmers; rpos++ ) {
KMer<K> kmer( read.begin() + rpos );
KDef const* pDef = mpDict->lookup(kmer);
if ( !pDef ) { continue; }
EdgeID edgeID = pDef->getEdgeID();
const UnipathEdge *pEdge = &mpGraph->getEdge(edgeID);
KmerID kmerID = pEdge->getKmerID( pDef->getEdgeOffset() );
bool rc = IsRC( kmer,kmerID );
// number of skipped bases from the unipath
int skipped = kmerID.val() - pEdge->getInitialKmerID().val();
short ustart = ( rc ? skipped - (nkmers-1 - rpos) : skipped - rpos );
ReadLocOnUnipath the_loc =
{ edgeID, ustart, static_cast<unsigned>(read_id), rc };
locs.insert( the_loc );
}
(*loc_vec).assign( locs.begin(), locs.end() );
}
int DownSampleLocsOfOneRead( PathSegVec *loc_vec ) const {
size_t nsegs = (*loc_vec).size();
if ( nsegs < 2 ) return 0;
int nkmers = mReads[ (*loc_vec)[0].rid ].size() -K + 1;
vec< std::pair<int,int> > seg_coverage(nsegs);
vec<int> seg_lens(nsegs);
for ( size_t i = 0; i < nsegs; ++i ) {
int ulen = mpGraph->getEdge( (*loc_vec)[i].uid ).getLength();
int rstart = -1, rstop = -1;
if ( ! (*loc_vec)[i].rc ) {
rstart = std::max( -(*loc_vec)[i].start, 0 );
rstop = std::min( rstart + ulen , nkmers );
}
else {
rstart = std::max( (*loc_vec)[i].start + nkmers - ulen, 0 );
rstop = std::min( rstart + ulen , nkmers );
}
seg_coverage[i] = std::make_pair(rstart, rstop);
seg_lens[i] = ulen;
}
// Select the segments, starting from the largest until every 10-base
// division in the read has enough coverage. Long unipaths are always
// kept.
const int kDivisionSize = 10;
const int kTargetDivCoverage = 1;
const int kGoodUnipathLen = 5;
vec<Bool> todel( nsegs, true);
vec<int> seg_indices( nsegs, vec<int>::IDENTITY );
ReverseSortSync( seg_lens, seg_indices );
vec<int> times_covered( (nkmers-1)/kDivisionSize + 1, 0);
bool ignore_tail_division = ( times_covered.size() * kDivisionSize - nkmers < 10 ) ;
for ( size_t i = 0; i < nsegs; ++i ) {
size_t seg_index = seg_indices[i];
//// discard redundant segments ( this division it covers all has enough segments )
//bool is_redundant = true;
//for( int j = seg_coverage[seg_index].first / kDivisionSize;
// j <= (seg_coverage[seg_index].second-1) / kDivisionSize; ++j )
// if ( times_covered[j] < kTargetDivCoverage ) {
// is_redundant = false;
// break;
// }
//if ( seg_lens[i] < kGoodUnipathLen && ! is_redundant
// || seg_lens[i] >= kGoodUnipathLen ) {
for( int j = seg_coverage[seg_index].first / kDivisionSize;
j <= (seg_coverage[seg_index].second-1) / kDivisionSize; ++j )
times_covered[j]++;
todel[seg_index] = false;
//}
// Are all divisions covered?
bool is_well_covered = true;
size_t div_end = ( ignore_tail_division ? times_covered.size() -1 : times_covered.size() );
for ( size_t j = 0; j < div_end; ++j ) {
if ( times_covered[j] < kTargetDivCoverage) {
is_well_covered = false;
break;
}
}
// exit conditions
if ( is_well_covered && seg_lens[i] < kGoodUnipathLen ) { break; }
}
// return values
EraseIf( *loc_vec, todel );
return nsegs - (*loc_vec).size();
}
bool IsRC( KMer<K> const& kmer, KmerID const& kmerID ) const {
using std::equal;
HugeBVec::const_iterator seqItr( mpGraph->getBases( kmerID ) );
bool result = ! equal( kmer.begin(),kmer.end(),seqItr );
Assert( !result || equal( kmer.rcbegin(),kmer.rcend(),seqItr ) );
return result;
}
void ReportMemUsage() {
std::cout << Date() << ": Peak memory use = "
<< PeakMemUsageBytes( ) / 1000000000.0 << std::resetiosflags(std::ios::fixed)
<< " GB" << std::endl;
}
private:
const vecbvec &mReads;
KmerDict<K> *mpDict;
UnipathGraph<K> *mpGraph;
vec<ReadULocVec> mULocs; // read path seg on unipaths, indexed by unipaths id
// sorted by the starting positon on unipath
int mCopyNumberMax; // Ignore short unipath with high copy number
Bool mUseDownSampling;
int mVerbose;
};
#endif
|
aux_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
#include "hypre_hopscotch_hash.h"
/*---------------------------------------------------------------------------
* Auxilary routines for the long range interpolation methods.
* Implemented: "standard", "extended", "multipass", "FF"
*--------------------------------------------------------------------------*/
/* AHB 11/06: Modification of the above original - takes two
communication packages and inserts nodes to position expected for
OUT_marker
offd nodes from comm_pkg take up first chunk of CF_marker_offd, offd
nodes from extend_comm_pkg take up the second chunk of CF_marker_offd. */
HYPRE_Int hypre_alt_insert_new_nodes(hypre_ParCSRCommPkg *comm_pkg,
hypre_ParCSRCommPkg *extend_comm_pkg,
HYPRE_Int *IN_marker,
HYPRE_Int full_off_procNodes,
HYPRE_Int *OUT_marker)
{
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int i, index, shift;
HYPRE_Int num_sends, num_recvs;
HYPRE_Int *recv_vec_starts;
HYPRE_Int e_num_sends;
HYPRE_Int *int_buf_data;
HYPRE_Int *e_out_marker;
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
e_num_sends = hypre_ParCSRCommPkgNumSends(extend_comm_pkg);
index = hypre_max(hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends));
int_buf_data = hypre_CTAlloc(HYPRE_Int, index, HYPRE_MEMORY_HOST);
/* orig commpkg data*/
index = 0;
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; ++i) {
int_buf_data[i - begin] =
IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
OUT_marker);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
/* now do the extend commpkg */
/* first we need to shift our position in the OUT_marker */
shift = recv_vec_starts[num_recvs];
e_out_marker = OUT_marker + shift;
index = 0;
begin = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, 0);
end = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; ++i) {
int_buf_data[i - begin] =
IN_marker[hypre_ParCSRCommPkgSendMapElmt(extend_comm_pkg, i)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, extend_comm_pkg, int_buf_data,
e_out_marker);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int hypre_big_insert_new_nodes(hypre_ParCSRCommPkg *comm_pkg,
hypre_ParCSRCommPkg *extend_comm_pkg,
HYPRE_Int *IN_marker,
HYPRE_Int full_off_procNodes,
HYPRE_BigInt offset,
HYPRE_BigInt *OUT_marker)
{
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int i, index, shift;
HYPRE_Int num_sends, num_recvs;
HYPRE_Int *recv_vec_starts;
HYPRE_Int e_num_sends;
HYPRE_BigInt *int_buf_data;
HYPRE_BigInt *e_out_marker;
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
e_num_sends = hypre_ParCSRCommPkgNumSends(extend_comm_pkg);
index = hypre_max(hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends));
int_buf_data = hypre_CTAlloc(HYPRE_BigInt, index, HYPRE_MEMORY_HOST);
/* orig commpkg data*/
index = 0;
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; ++i) {
int_buf_data[i - begin] = offset +
(HYPRE_BigInt) IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, int_buf_data,
OUT_marker);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
/* now do the extend commpkg */
/* first we need to shift our position in the OUT_marker */
shift = recv_vec_starts[num_recvs];
e_out_marker = OUT_marker + shift;
index = 0;
begin = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, 0);
end = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; ++i) {
int_buf_data[i - begin] = offset +
(HYPRE_BigInt) IN_marker[hypre_ParCSRCommPkgSendMapElmt(extend_comm_pkg, i)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, extend_comm_pkg, int_buf_data,
e_out_marker);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* sort for non-ordered arrays */
HYPRE_Int hypre_ssort(HYPRE_BigInt *data, HYPRE_Int n)
{
HYPRE_Int i,si;
HYPRE_Int change = 0;
if(n > 0)
for(i = n-1; i > 0; i--){
si = hypre_index_of_minimum(data,i+1);
if(i != si)
{
hypre_swap_int(data, i, si);
change = 1;
}
}
return change;
}
/* Auxilary function for hypre_ssort */
HYPRE_Int hypre_index_of_minimum(HYPRE_BigInt *data, HYPRE_Int n)
{
HYPRE_Int answer;
HYPRE_Int i;
answer = 0;
for(i = 1; i < n; i++)
if(data[answer] < data[i])
answer = i;
return answer;
}
void hypre_swap_int(HYPRE_BigInt *data, HYPRE_Int a, HYPRE_Int b)
{
HYPRE_BigInt temp;
temp = data[a];
data[a] = data[b];
data[b] = temp;
return;
}
/* Initialize CF_marker_offd, CF_marker, P_marker, P_marker_offd, tmp */
void hypre_initialize_vecs(HYPRE_Int diag_n, HYPRE_Int offd_n, HYPRE_Int *diag_ftc, HYPRE_BigInt *offd_ftc,
HYPRE_Int *diag_pm, HYPRE_Int *offd_pm, HYPRE_Int *tmp_CF)
{
HYPRE_Int i;
/* Quicker initialization */
if(offd_n < diag_n)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for(i = 0; i < offd_n; i++)
{
diag_ftc[i] = -1;
offd_ftc[i] = -1;
tmp_CF[i] = -1;
if(diag_pm != NULL)
{ diag_pm[i] = -1; }
if(offd_pm != NULL)
{ offd_pm[i] = -1;}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for(i = offd_n; i < diag_n; i++)
{
diag_ftc[i] = -1;
if(diag_pm != NULL)
{ diag_pm[i] = -1; }
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for(i = 0; i < diag_n; i++)
{
diag_ftc[i] = -1;
offd_ftc[i] = -1;
tmp_CF[i] = -1;
if(diag_pm != NULL)
{ diag_pm[i] = -1;}
if(offd_pm != NULL)
{ offd_pm[i] = -1;}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for(i = diag_n; i < offd_n; i++)
{
offd_ftc[i] = -1;
tmp_CF[i] = -1;
if(offd_pm != NULL)
{ offd_pm[i] = -1;}
}
}
return;
}
/* Find nodes that are offd and are not contained in original offd
* (neighbors of neighbors) */
static HYPRE_Int hypre_new_offd_nodes(HYPRE_BigInt **found, HYPRE_Int num_cols_A_offd,
HYPRE_Int *A_ext_i, HYPRE_BigInt *A_ext_j,
HYPRE_Int num_cols_S_offd, HYPRE_BigInt *col_map_offd, HYPRE_BigInt col_1,
HYPRE_BigInt col_n, HYPRE_Int *Sop_i, HYPRE_BigInt *Sop_j,
HYPRE_Int *CF_marker_offd)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
#endif
HYPRE_BigInt big_i1, big_k1;
HYPRE_Int i, j, kk;
HYPRE_Int got_loc, loc_col;
/*HYPRE_Int min;*/
HYPRE_Int newoff = 0;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedBigIntMap col_map_offd_inverse;
hypre_UnorderedBigIntMapCreate(&col_map_offd_inverse, 2*num_cols_A_offd, 16*hypre_NumThreads());
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_A_offd; i++)
{
hypre_UnorderedBigIntMapPutIfAbsent(&col_map_offd_inverse, col_map_offd[i], i);
}
/* Find nodes that will be added to the off diag list */
HYPRE_Int size_offP = A_ext_i[num_cols_A_offd];
hypre_UnorderedBigIntSet set;
hypre_UnorderedBigIntSetCreate(&set, size_offP, 16*hypre_NumThreads());
#pragma omp parallel private(i,j,big_i1)
{
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] < 0)
{
for (j = A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_i1 = A_ext_j[j];
if(big_i1 < col_1 || big_i1 >= col_n)
{
if (!hypre_UnorderedBigIntSetContains(&set, big_i1))
{
HYPRE_Int k = hypre_UnorderedBigIntMapGet(&col_map_offd_inverse, big_i1);
if (-1 == k)
{
hypre_UnorderedBigIntSetPut(&set, big_i1);
}
else
{
A_ext_j[j] = -k - 1;
}
}
}
}
for (j = Sop_i[i]; j < Sop_i[i+1]; j++)
{
big_i1 = Sop_j[j];
if(big_i1 < col_1 || big_i1 >= col_n)
{
if (!hypre_UnorderedBigIntSetContains(&set, big_i1))
{
Sop_j[j] = -hypre_UnorderedBigIntMapGet(&col_map_offd_inverse, big_i1) - 1;
}
}
}
} /* CF_marker_offd[i] < 0 */
} /* for each row */
} /* omp parallel */
hypre_UnorderedBigIntMapDestroy(&col_map_offd_inverse);
HYPRE_BigInt *tmp_found = hypre_UnorderedBigIntSetCopyToArray(&set, &newoff);
hypre_UnorderedBigIntSetDestroy(&set);
/* Put found in monotone increasing order */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
hypre_UnorderedBigIntMap tmp_found_inverse;
if (newoff > 0)
{
hypre_big_sort_and_create_inverse_map(tmp_found, newoff, &tmp_found, &tmp_found_inverse);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
/* Set column indices for Sop and A_ext such that offd nodes are
* negatively indexed */
#pragma omp parallel for private(kk,big_k1,got_loc,loc_col) HYPRE_SMP_SCHEDULE
for(i = 0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] < 0)
{
for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n))
{
got_loc = hypre_UnorderedBigIntMapGet(&tmp_found_inverse, big_k1);
loc_col = got_loc + num_cols_A_offd;
Sop_j[kk] = (HYPRE_BigInt)(-loc_col - 1);
}
}
for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++)
{
big_k1 = A_ext_j[kk];
if(big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n))
{
got_loc = hypre_UnorderedBigIntMapGet(&tmp_found_inverse, big_k1);
loc_col = got_loc + num_cols_A_offd;
A_ext_j[kk] = (HYPRE_BigInt)(-loc_col - 1);
}
}
}
}
if (newoff)
{
hypre_UnorderedBigIntMapDestroy(&tmp_found_inverse);
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
HYPRE_Int size_offP;
HYPRE_BigInt *tmp_found;
HYPRE_Int min;
HYPRE_Int ifound;
size_offP = A_ext_i[num_cols_A_offd]+Sop_i[num_cols_A_offd];
tmp_found = hypre_CTAlloc(HYPRE_BigInt, size_offP, HYPRE_MEMORY_HOST);
/* Find nodes that will be added to the off diag list */
for (i = 0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] < 0)
{
for (j = A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
big_i1 = A_ext_j[j];
if(big_i1 < col_1 || big_i1 >= col_n)
{
ifound = hypre_BigBinarySearch(col_map_offd,big_i1,num_cols_A_offd);
if(ifound == -1)
{
tmp_found[newoff]=big_i1;
newoff++;
}
else
{
A_ext_j[j] = (HYPRE_BigInt)(-ifound-1);
}
}
}
for (j = Sop_i[i]; j < Sop_i[i+1]; j++)
{
big_i1 = Sop_j[j];
if(big_i1 < col_1 || big_i1 >= col_n)
{
ifound = hypre_BigBinarySearch(col_map_offd,big_i1,num_cols_A_offd);
if(ifound == -1)
{
tmp_found[newoff]=big_i1;
newoff++;
}
else
{
Sop_j[j] = (HYPRE_BigInt)(-ifound-1);
}
}
}
}
}
/* Put found in monotone increasing order */
if (newoff > 0)
{
hypre_BigQsort0(tmp_found,0,newoff-1);
ifound = tmp_found[0];
min = 1;
for (i=1; i < newoff; i++)
{
if (tmp_found[i] > ifound)
{
ifound = tmp_found[i];
tmp_found[min++] = ifound;
}
}
newoff = min;
}
/* Set column indices for Sop and A_ext such that offd nodes are
* negatively indexed */
for(i = 0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] < 0)
{
for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++)
{
big_k1 = Sop_j[kk];
if(big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n))
{
got_loc = hypre_BigBinarySearch(tmp_found,big_k1,newoff);
if(got_loc > -1)
loc_col = got_loc + num_cols_A_offd;
Sop_j[kk] = (HYPRE_BigInt)(-loc_col - 1);
}
}
for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++)
{
big_k1 = A_ext_j[kk];
if(big_k1 > -1 && (big_k1 < col_1 || big_k1 >= col_n))
{
got_loc = hypre_BigBinarySearch(tmp_found,big_k1,newoff);
loc_col = got_loc + num_cols_A_offd;
A_ext_j[kk] = (HYPRE_BigInt)(-loc_col - 1);
}
}
}
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
*found = tmp_found;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
#endif
return newoff;
}
HYPRE_Int hypre_exchange_marker(hypre_ParCSRCommPkg *comm_pkg,
HYPRE_Int *IN_marker,
HYPRE_Int *OUT_marker)
{
HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
HYPRE_Int *int_buf_data = hypre_CTAlloc(HYPRE_Int, end, HYPRE_MEMORY_HOST);
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; ++i) {
int_buf_data[i - begin] =
IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];
}
hypre_ParCSRCommHandle *comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
OUT_marker);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int hypre_exchange_interp_data(
HYPRE_Int **CF_marker_offd,
HYPRE_Int **dof_func_offd,
hypre_CSRMatrix **A_ext,
HYPRE_Int *full_off_procNodes,
hypre_CSRMatrix **Sop,
hypre_ParCSRCommPkg **extend_comm_pkg,
hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int skip_fine_or_same_sign) // skip_fine_or_same_sign if we want to skip fine points in S and nnz with the same sign as diagonal in A
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] -= hypre_MPI_Wtime();
#endif
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_BigInt *found = NULL;
/*----------------------------------------------------------------------
* Get the off processors rows for A and S, associated with columns in
* A_offd and S_offd.
*---------------------------------------------------------------------*/
*CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
hypre_exchange_marker(comm_pkg, CF_marker, *CF_marker_offd);
hypre_ParCSRCommHandle *comm_handle_a_idx, *comm_handle_a_data;
*A_ext = hypre_ParCSRMatrixExtractBExt_Overlap(A,A,1,&comm_handle_a_idx,&comm_handle_a_data,CF_marker,*CF_marker_offd,skip_fine_or_same_sign,skip_fine_or_same_sign);
HYPRE_Int *A_ext_i = hypre_CSRMatrixI(*A_ext);
HYPRE_BigInt *A_ext_j = hypre_CSRMatrixBigJ(*A_ext);
HYPRE_Int A_ext_rows = hypre_CSRMatrixNumRows(*A_ext);
hypre_ParCSRCommHandle *comm_handle_s_idx;
*Sop = hypre_ParCSRMatrixExtractBExt_Overlap(S,A,0,&comm_handle_s_idx,NULL,CF_marker,*CF_marker_offd,skip_fine_or_same_sign,0);
HYPRE_Int *Sop_i = hypre_CSRMatrixI(*Sop);
HYPRE_BigInt *Sop_j = hypre_CSRMatrixBigJ(*Sop);
HYPRE_Int Soprows = hypre_CSRMatrixNumRows(*Sop);
HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_s_idx->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_s_idx);
hypre_TFree(send_idx, HYPRE_MEMORY_HOST);
send_idx = (HYPRE_Int *)comm_handle_a_idx->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_a_idx);
hypre_TFree(send_idx, HYPRE_MEMORY_HOST);
/* Find nodes that are neighbors of neighbors, not found in offd */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] += hypre_MPI_Wtime();
#endif
HYPRE_Int newoff = hypre_new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j,
Soprows, col_map_offd, col_1, col_n,
Sop_i, Sop_j, *CF_marker_offd);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] -= hypre_MPI_Wtime();
#endif
if(newoff >= 0)
*full_off_procNodes = newoff + num_cols_A_offd;
else
{
return hypre_error_flag;
}
/* Possibly add new points and new processors to the comm_pkg, all
* processors need new_comm_pkg */
/* AHB - create a new comm package just for extended info -
this will work better with the assumed partition*/
hypre_ParCSRFindExtendCommPkg(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixFirstColDiag(A),
hypre_CSRMatrixNumCols(A_diag),
hypre_ParCSRMatrixColStarts(A),
hypre_ParCSRMatrixAssumedPartition(A),
newoff,
found,
extend_comm_pkg);
*CF_marker_offd = hypre_TReAlloc(*CF_marker_offd, HYPRE_Int, *full_off_procNodes, HYPRE_MEMORY_HOST);
hypre_exchange_marker(*extend_comm_pkg, CF_marker, *CF_marker_offd + A_ext_rows);
if(num_functions > 1)
{
if (*full_off_procNodes > 0)
*dof_func_offd = hypre_CTAlloc(HYPRE_Int, *full_off_procNodes, HYPRE_MEMORY_HOST);
hypre_alt_insert_new_nodes(comm_pkg, *extend_comm_pkg, dof_func,
*full_off_procNodes, *dof_func_offd);
}
hypre_TFree(found, HYPRE_MEMORY_HOST);
HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_a_data->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_a_data);
hypre_TFree(send_data, HYPRE_MEMORY_HOST);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
void hypre_build_interp_colmap(hypre_ParCSRMatrix *P, HYPRE_Int full_off_procNodes, HYPRE_Int *tmp_CF_marker_offd, HYPRE_BigInt *fine_to_coarse_offd)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
#endif
HYPRE_Int i, index;
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P->diag);
HYPRE_Int P_offd_size = P->offd->i[n_fine];
HYPRE_Int *P_offd_j = P->offd->j;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_Int *P_marker = NULL;
if (full_off_procNodes)
P_marker = hypre_TAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < full_off_procNodes; i++)
P_marker[i] = 0;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
/* These two loops set P_marker[i] to 1 if it appears in P_offd_j and if
* tmp_CF_marker_offd has i marked. num_cols_P_offd is then set to the
* total number of times P_marker is set */
#pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if(tmp_CF_marker_offd[index] >= 0)
{ P_marker[index] = 1; }
}
HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1];
HYPRE_Int num_cols_P_offd = 0;
#pragma omp parallel private(i)
{
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, full_off_procNodes);
HYPRE_Int local_num_cols_P_offd = 0;
for (i = i_begin; i < i_end; i++)
{
if (P_marker[i] == 1) local_num_cols_P_offd++;
}
hypre_prefix_sum(&local_num_cols_P_offd, &num_cols_P_offd, prefix_sum_workspace);
#pragma omp master
{
if (num_cols_P_offd)
col_map_offd_P = hypre_TAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
}
#pragma omp barrier
for (i = i_begin; i < i_end; i++)
{
if (P_marker[i] == 1)
{
col_map_offd_P[local_num_cols_P_offd++] = fine_to_coarse_offd[i];
}
}
}
hypre_UnorderedBigIntMap col_map_offd_P_inverse;
hypre_big_sort_and_create_inverse_map(col_map_offd_P, num_cols_P_offd, &col_map_offd_P, &col_map_offd_P_inverse);
// find old idx -> new idx map
#pragma omp parallel for
for (i = 0; i < full_off_procNodes; i++)
P_marker[i] = hypre_UnorderedBigIntMapGet(&col_map_offd_P_inverse, fine_to_coarse_offd[i]);
if (num_cols_P_offd)
{
hypre_UnorderedBigIntMapDestroy(&col_map_offd_P_inverse);
}
#pragma omp parallel for
for(i = 0; i < P_offd_size; i++)
P_offd_j[i] = P_marker[P_offd_j[i]];
#else /* HYPRE_CONCURRENT_HOPSCOTCH */
HYPRE_Int num_cols_P_offd = 0;
HYPRE_Int j;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
if(tmp_CF_marker_offd[index] >= 0)
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
}
if (num_cols_P_offd)
{
HYPRE_Int *tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
HYPRE_BigInt *tmp_marker = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while( P_marker[index] == 0) index++;
tmp_map_offd[i] = index++;
}
for(i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) index++;
col_map_offd_P[i] = fine_to_coarse_offd[index];
index++;
}
/* Sort the col_map_offd_P and P_offd_j correctly */
for(i = 0; i < num_cols_P_offd; i++)
tmp_marker[i] = col_map_offd_P[i];
/* Check if sort actually changed anything */
if(hypre_ssort(col_map_offd_P,num_cols_P_offd))
{
for(i = 0; i < P_offd_size; i++)
for(j = 0; j < num_cols_P_offd; j++)
if(tmp_marker[P_offd_j[i]] == col_map_offd_P[j])
{
P_offd_j[i] = j;
j = num_cols_P_offd;
}
}
hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
}
#endif /* HYPRE_CONCURRENT_HOPSCOTCH */
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P->offd) = num_cols_P_offd;
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
#endif
}
|
test.c | #include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
// enable tests
#define CHECK 1
#define DEBUG 0
#define N (992)
#define INIT() INIT_LOOP(N, {A[i] = 0; C[i] = 1; D[i] = i; E[i] = -i;})
int main(void){
#if CHECK
check_offloading();
#endif
/*
* Default device
*/
printf("Is%s initial device\n", omp_is_initial_device() ? "" : " not");
printf("Initial device: %d\n", omp_get_initial_device());
omp_set_default_device(1);
printf("Default device before task: %d\n", omp_get_default_device());
#pragma omp task
{
printf("Default device inside task: %d\n", omp_get_default_device());
omp_set_default_device(2);
printf("Default device inside task after resetting: %d\n",
omp_get_default_device());
}
#pragma omp taskwait
printf("Default device outside task: %d\n", omp_get_default_device());
// default device can set to whatever, if target fails, it goes to the host
const int default_device = 0;
omp_set_default_device(default_device);
// default device for omp target call MUST be >= 0 and <omp_get_num_devices() or
// the initial device. So when there are no devices, it must be the initial device
int default_device_omp_target_call = default_device;
if (omp_get_num_devices() == 0) {
default_device_omp_target_call = omp_get_initial_device();
}
#if DEBUG
printf("test on machine with %d devices\n", omp_get_num_devices());
#endif
/*
* Target alloc & target memcpy
*/
double A[N], B[N], C[N], D[N], E[N];
double *pA, *pB, *pC, *pD, *pE;
// map ptrs
pA = &A[0];
pB = &B[0];
pC = &C[0];
pD = &D[0];
pE = &E[0];
INIT();
pA = pA - 10;
pC = pC - 20;
pD = pD - 30;
void *device_A = omp_target_alloc(N*sizeof(double), default_device_omp_target_call);
void *device_C = omp_target_alloc(N*sizeof(double), default_device_omp_target_call);
void *device_D = omp_target_alloc(N*sizeof(double), default_device_omp_target_call);
double *dpA = (double *) device_A - 100;
double *dpC = (double *) device_C - 200;
double *dpD = (double *) device_D - 300;
printf("omp_target_alloc %s\n", device_A && device_C && device_D ?
"succeeded" : "failed");
omp_target_memcpy(dpC, pC, N*sizeof(double), 200*sizeof(double),
20*sizeof(double), default_device_omp_target_call, omp_get_initial_device());
omp_target_memcpy(dpD, pD, N*sizeof(double), 300*sizeof(double),
30*sizeof(double), default_device_omp_target_call, omp_get_initial_device());
#pragma omp target is_device_ptr(dpA, dpC, dpD) device(default_device)
{
#pragma omp parallel for schedule(static,1)
for (int i = 0; i < 992; i++)
dpA[i+100] = dpC[i+200] + dpD[i+300] + 1;
}
omp_target_memcpy(pA, dpA, N*sizeof(double), 10*sizeof(double),
100*sizeof(double), omp_get_initial_device(), default_device_omp_target_call);
int fail = 0;
VERIFY(0, N, A[i], (double)(i+2));
if (fail) {
printf ("Test omp_target_memcpy: Failed\n");
} else {
printf ("Test omp_target_memcpy: Succeeded\n");
}
/*
* target_is_present and target_associate/disassociate_ptr
*/
INIT();
if (offloading_disabled()) {
// If offloading is disabled just recreate the messages so that this can
// also be tested with no device.
printf("C is not present, associating it...\n");
printf("omp_target_associate_ptr C %s\n", 1 ? "succeeded" : "failed");
} else if (!omp_target_is_present(C, default_device_omp_target_call)) {
printf("C is not present, associating it...\n");
int rc = omp_target_associate_ptr(C, dpC, N*sizeof(double),
200*sizeof(double), default_device_omp_target_call);
printf("omp_target_associate_ptr C %s\n", !rc ? "succeeded" : "failed");
}
if (offloading_disabled()) {
// If offloading is disabled just recreate the messages so that this can
// also be tested with no device.
printf("D is not present, associating it...\n");
printf("omp_target_associate_ptr D %s\n", 1 ? "succeeded" : "failed");
} else if (!omp_target_is_present(D, default_device_omp_target_call)) {
printf("D is not present, associating it...\n");
int rc = omp_target_associate_ptr(D, dpD, N*sizeof(double),
300*sizeof(double), default_device_omp_target_call);
printf("omp_target_associate_ptr D %s\n", !rc ? "succeeded" : "failed");
}
#pragma omp target data map(from: C, D) device(default_device)
{
printf("Inside target data: A is%s present\n",
(omp_target_is_present(A, default_device_omp_target_call) && !offloading_disabled()) ? "" : " not");
printf("Inside target data: C is%s present\n",
omp_target_is_present(C, default_device_omp_target_call) ? "" : " not");
printf("Inside target data: D is%s present\n",
omp_target_is_present(D, default_device_omp_target_call) ? "" : " not");
// C and D are mapped "from", so there is no copy from host to device.
// If the association was successful, their corresponding device arrays
// are already populated from previous omp_target_memcpy with the correct
// values and the following target for-loop must yield the correct results.
#pragma omp target map(from: A) device(default_device)
{
#pragma omp parallel for schedule(static,1)
for (int i = 0; i < 992; i++)
A[i] = C[i] + D[i] + 1;
}
}
if (offloading_disabled()) {
printf("C is present, disassociating it...\n");
printf("omp_target_disassociate_ptr C %s\n", 1 ? "succeeded" : "failed");
} else if (omp_target_is_present(C, default_device_omp_target_call)) {
printf("C is present, disassociating it...\n");
int rc = omp_target_disassociate_ptr(C, default_device_omp_target_call);
printf("omp_target_disassociate_ptr C %s\n", !rc ? "succeeded" : "failed");
}
if (offloading_disabled()) {
printf("D is present, disassociating it...\n");
printf("omp_target_disassociate_ptr D %s\n", 1 ? "succeeded" : "failed");
} else if (omp_target_is_present(D, default_device_omp_target_call)) {
printf("D is present, disassociating it...\n");
int rc = omp_target_disassociate_ptr(D, default_device_omp_target_call);
printf("omp_target_disassociate_ptr D %s\n", !rc ? "succeeded" : "failed");
}
fail = 0;
VERIFY(0, N, A[i], (double)(i+2));
if (fail) {
printf ("Test omp_target_associate_ptr: Failed\n");
} else {
printf ("Test omp_target_associate_ptr: Succeeded\n");
}
omp_target_free(device_A, default_device_omp_target_call);
omp_target_free(device_C, default_device_omp_target_call);
omp_target_free(device_D, default_device_omp_target_call);
return 0;
}
|
SE3P_Stokes_direct.c | #include <math.h>
#include "SE_Stokes_direct.h"
#ifdef HASIMOTO
#include "hasimoto_decomp.h"
#elif BEENAKKER
#include "beenakker_decomp.h"
#else
#error "Must provide -D<decomposition> to compiler"
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
void SE3P_Stokes_direct_real(double* restrict u,
const int* restrict idx, int nidx,
const double* restrict x,
const double* restrict f, int N,
const ewald_opts opt)
{
const int nbox = opt.layers;
double r[3];
double xm[3];
double A[3][3];
int i1, i2, i3, m, n;
for(m=0; m<nidx; m++) // for all evaluation points
{
u[m ] = 0;
u[m+nidx ] = 0;
u[m+2*nidx] = 0;
xm[0] = x[idx[m] ]; // indirect indexing OK in outer loop
xm[1] = x[idx[m]+N ];
xm[2] = x[idx[m]+2*N];
for(i1 = -nbox; i1<=nbox; i1++) // image boxes
for(i2 = -nbox; i2<=nbox; i2++)
for(i3 = -nbox; i3<=nbox; i3++)
{
for(n=0; n<N; n++) // for all particles
{
if(i1==0 && i2==0 && i3==0 && n==idx[m]) // skip self
continue;
r[0] = xm[0]-x[n ]+opt.box[0]*i1;
r[1] = xm[1]-x[n+ N]+opt.box[1]*i2;
r[2] = xm[2]-x[n+2*N]+opt.box[2]*i3;
op_A(A,r,opt.xi); // u += A*f
u[m ] +=
A[0][0]*f[n]+A[0][1]*f[n+N]+A[0][2]*f[n+2*N];
u[m+nidx ] +=
A[1][0]*f[n]+A[1][1]*f[n+N]+A[1][2]*f[n+2*N];
u[m+2*nidx] +=
A[2][0]*f[n]+A[2][1]*f[n+N]+A[2][2]*f[n+2*N];
}
}
}
}
void SE3P_Stokes_direct_real_rc(double* restrict u,
const int* restrict idx, int nidx,
const double* restrict x,
const double* restrict f, int N,
const ewald_opts opt)
{
const int nbox = opt.layers;
double r[3];
double xm[3];
double A[3][3];
int i1, i2, i3, m, n;
for(m=0; m<nidx; m++) // for all evaluation points
{
u[m ] = 0;
u[m+nidx ] = 0;
u[m+2*nidx] = 0;
xm[0] = x[idx[m] ]; // indirect indexing OK in outer loop
xm[1] = x[idx[m]+N ];
xm[2] = x[idx[m]+2*N];
for(i1 = -nbox; i1<=nbox; i1++) // image boxes
for(i2 = -nbox; i2<=nbox; i2++)
for(i3 = -nbox; i3<=nbox; i3++)
{
for(n=0; n<N; n++) // for all particles
{
if(i1==0 && i2==0 && i3==0 && n==idx[m]) // skip self
continue;
r[0] = xm[0]-x[n ]+opt.box[0]*i1;
r[1] = xm[1]-x[n+ N]+opt.box[1]*i2;
r[2] = xm[2]-x[n+2*N]+opt.box[2]*i3;
if(sqrt(r[0]*r[0] + r[1]*r[1] + r[2]*r[2]) > opt.rc)
continue; // skip outside rc
op_A(A,r,opt.xi); // u += A*f
u[m ] +=
A[0][0]*f[n]+A[0][1]*f[n+N]+A[0][2]*f[n+2*N];
u[m+nidx ] +=
A[1][0]*f[n]+A[1][1]*f[n+N]+A[1][2]*f[n+2*N];
u[m+2*nidx] +=
A[2][0]*f[n]+A[2][1]*f[n+N]+A[2][2]*f[n+2*N];
}
}
}
}
void SE3P_Stokes_direct_real_ext_rc(double* restrict u,
const double* restrict xt, const int Nt,
const double* restrict x,
const double* restrict f, const int N,
const ewald_opts opt)
{
const int nbox = opt.layers;
double r[3];
double xm[3];
double A[3][3];
int i1, i2, i3, m, n;
#ifdef _OPENMP
#pragma omp parallel for \
private(r,xm,A,i1,i2,i3,m,n) \
shared(u,xt,x,f) \
default(none)
#endif
for(m=0; m<Nt; m++) // for all evaluation points
{
double um[3] = {0.0, 0.0, 0.0};
xm[0] = xt[m ];
xm[1] = xt[m+Nt ];
xm[2] = xt[m+2*Nt];
for(i1 = -nbox; i1<=nbox; i1++) // image boxes
for(i2 = -nbox; i2<=nbox; i2++)
for(i3 = -nbox; i3<=nbox; i3++)
{
for(n=0; n<N; n++) // for all particles
{
// Assuming that r != 0 in home box
r[0] = xm[0]-x[n ]+opt.box[0]*i1;
r[1] = xm[1]-x[n+ N]+opt.box[1]*i2;
r[2] = xm[2]-x[n+2*N]+opt.box[2]*i3;
if(sqrt(r[0]*r[0] + r[1]*r[1] + r[2]*r[2]) > opt.rc)
continue; // skip outside rc
op_A(A,r,opt.xi); // u += A*f
um[0] +=
A[0][0]*f[n]+A[0][1]*f[n+N]+A[0][2]*f[n+2*N];
um[1] +=
A[1][0]*f[n]+A[1][1]*f[n+N]+A[1][2]*f[n+2*N];
um[2] +=
A[2][0]*f[n]+A[2][1]*f[n+N]+A[2][2]*f[n+2*N];
}
}
u[m ] = um[0];
u[m+Nt ] = um[1];
u[m+2*Nt] = um[2];
}
}
void SE3P_Stokes_direct_fd(double* restrict u,
const int* restrict idx, int nidx,
const double* restrict x,
const double* restrict f, int N,
const ewald_opts opt)
{
double B[3][3];
double z[3];
double k[3];
double xm[3];
int i1, i2, i3, m, n;
double q, k_dot_r;
const double vol = opt.box[0]*opt.box[1]*opt.box[2];
const int kmax=opt.layers;
const double kc0 = 2.0*PI/opt.box[0];
const double kc1 = 2.0*PI/opt.box[1];
const double kc2 = 2.0*PI/opt.box[2];
for(m=0; m<nidx; m++) // for all evaluation points
{
u[m ] = 0;
u[m+ nidx] = 0;
u[m+2*nidx] = 0;
xm[0] = x[idx[m] ]; // indirect indexing OK in outer loop
xm[1] = x[idx[m]+N ];
xm[2] = x[idx[m]+2*N];
for(i1 = -kmax; i1<=kmax; i1++) // for k-space cube
for(i2 = -kmax; i2<=kmax; i2++)
for(i3 = -kmax; i3<=kmax; i3++)
{
if(i3 != 0 || i2 != 0 || i1 != 0) // exclude k=0
{
z[0] = 0; z[1] = 0; z[2] = 0;
k[0] = kc0*i1;
k[1] = kc1*i2;
k[2] = kc2*i3;
for(n=0; n<N; n++) // for all particles
{
k_dot_r =
k[0]*(xm[0]-x[n ])+
k[1]*(xm[1]-x[n+N ])+
k[2]*(xm[2]-x[n+2*N]);
q = cos(k_dot_r);
z[0] += q*f[n ];
z[1] += q*f[n+ N];
z[2] += q*f[n+2*N];
}
op_B(B,k,opt.xi); // multiplication
u[m ] += B[0][0]*z[0]+B[0][1]*z[1]+B[0][2]*z[2];
u[m+nidx ] += B[1][0]*z[0]+B[1][1]*z[1]+B[1][2]*z[2];
u[m+2*nidx] += B[2][0]*z[0]+B[2][1]*z[1]+B[2][2]*z[2];
}
}
u[m ] /= vol;
u[m+ nidx] /= vol;
u[m+2*nidx] /= vol;
}
}
void SE3P_Stokes_direct_self(double* restrict u,
const int* restrict idx, int nidx,
const double* restrict f, int N,
const ewald_opts opt)
{
double c = self_coeff(opt.xi);
for(int m=0; m<nidx; m++)
{
u[m ] = c*f[idx[m] ];
u[m+ nidx] = c*f[idx[m]+N ];
u[m+2*nidx] = c*f[idx[m]+2*N];
}
}
|
blas_server_omp.c | /*********************************************************************/
/* Copyright 2009, 2010 The University of Texas at Austin. */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or */
/* without modification, are permitted provided that the following */
/* conditions are met: */
/* */
/* 1. Redistributions of source code must retain the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above */
/* copyright notice, this list of conditions and the following */
/* disclaimer in the documentation and/or other materials */
/* provided with the distribution. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */
/* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */
/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */
/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */
/* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */
/* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */
/* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */
/* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */
/* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */
/* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */
/* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
/* */
/* The views and conclusions contained in the software and */
/* documentation are those of the authors and should not be */
/* interpreted as representing official policies, either expressed */
/* or implied, of The University of Texas at Austin. */
/*********************************************************************/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
//#include <sys/mman.h>
#include "common.h"
#ifndef USE_OPENMP
#include "blas_server.c"
#else
#ifndef OMP_SCHED
#define OMP_SCHED static
#endif
int blas_server_avail = 0;
static void * blas_thread_buffer[MAX_PARALLEL_NUMBER][MAX_CPU_NUMBER];
#ifdef HAVE_C11
static atomic_bool blas_buffer_inuse[MAX_PARALLEL_NUMBER];
#else
static _Bool blas_buffer_inuse[MAX_PARALLEL_NUMBER];
#endif
void goto_set_num_threads(int num_threads) {
int i=0, j=0;
if (num_threads < 1) num_threads = blas_num_threads;
if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER;
if (num_threads > blas_num_threads) {
blas_num_threads = num_threads;
}
blas_cpu_number = num_threads;
omp_set_num_threads(blas_cpu_number);
//adjust buffer for each thread
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<blas_cpu_number; j++){
if(blas_thread_buffer[i][j]==NULL){
blas_thread_buffer[i][j]=blas_memory_alloc(2);
}
}
for(; j<MAX_CPU_NUMBER; j++){
if(blas_thread_buffer[i][j]!=NULL){
blas_memory_free(blas_thread_buffer[i][j]);
blas_thread_buffer[i][j]=NULL;
}
}
}
#if defined(ARCH_MIPS64)
//set parameters for different number of threads.
blas_set_parameter();
#endif
}
void openblas_set_num_threads(int num_threads) {
goto_set_num_threads(num_threads);
}
int blas_thread_init(void){
int i=0, j=0;
blas_get_cpu_number();
blas_server_avail = 1;
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<blas_num_threads; j++){
blas_thread_buffer[i][j]=blas_memory_alloc(2);
}
for(; j<MAX_CPU_NUMBER; j++){
blas_thread_buffer[i][j]=NULL;
}
}
return 0;
}
int BLASFUNC(blas_thread_shutdown)(void){
int i=0, j=0;
blas_server_avail = 0;
for(i=0; i<MAX_PARALLEL_NUMBER; i++) {
for(j=0; j<MAX_CPU_NUMBER; j++){
if(blas_thread_buffer[i][j]!=NULL){
blas_memory_free(blas_thread_buffer[i][j]);
blas_thread_buffer[i][j]=NULL;
}
}
}
return 0;
}
static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){
if (!(mode & BLAS_COMPLEX)){
#ifdef EXPRECISION
if (mode & BLAS_XDOUBLE){
/* REAL / Extended Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble,
xdouble *, BLASLONG, xdouble *, BLASLONG,
xdouble *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((xdouble *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else
#endif
if (mode & BLAS_DOUBLE){
/* REAL / Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double,
double *, BLASLONG, double *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else {
/* REAL / Single */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float,
float *, BLASLONG, float *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
}
} else {
#ifdef EXPRECISION
if (mode & BLAS_XDOUBLE){
/* COMPLEX / Extended Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble,
xdouble *, BLASLONG, xdouble *, BLASLONG,
xdouble *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((xdouble *)args -> alpha)[0],
((xdouble *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else
#endif
if (mode & BLAS_DOUBLE){
/* COMPLEX / Double */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double,
double *, BLASLONG, double *, BLASLONG,
double *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((double *)args -> alpha)[0],
((double *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
} else {
/* COMPLEX / Single */
void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float,
float *, BLASLONG, float *, BLASLONG,
float *, BLASLONG, void *) = func;
afunc(args -> m, args -> n, args -> k,
((float *)args -> alpha)[0],
((float *)args -> alpha)[1],
args -> a, args -> lda,
args -> b, args -> ldb,
args -> c, args -> ldc, sb);
}
}
}
static void exec_threads(blas_queue_t *queue, int buf_index){
void *buffer, *sa, *sb;
int pos=0, release_flag=0;
buffer = NULL;
sa = queue -> sa;
sb = queue -> sb;
#ifdef CONSISTENT_FPCSR
__asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode));
__asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode));
#endif
if ((sa == NULL) && (sb == NULL) && ((queue -> mode & BLAS_PTHREAD) == 0)) {
pos = omp_get_thread_num();
buffer = blas_thread_buffer[buf_index][pos];
//fallback
if(buffer==NULL) {
buffer = blas_memory_alloc(2);
release_flag=1;
}
if (sa == NULL) {
sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A);
queue->sa=sa;
}
if (sb == NULL) {
if (!(queue -> mode & BLAS_COMPLEX)){
#ifdef EXPRECISION
if (queue -> mode & BLAS_XDOUBLE){
sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else
#endif
if (queue -> mode & BLAS_DOUBLE){
sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else {
sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
}
} else {
#ifdef EXPRECISION
if (queue -> mode & BLAS_XDOUBLE){
sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else
#endif
if (queue -> mode & BLAS_DOUBLE){
sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
} else {
sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float)
+ GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B);
}
}
queue->sb=sb;
}
}
if (queue -> mode & BLAS_LEGACY) {
legacy_exec(queue -> routine, queue -> mode, queue -> args, sb);
} else
if (queue -> mode & BLAS_PTHREAD) {
void (*pthreadcompat)(void *) = queue -> routine;
(pthreadcompat)(queue -> args);
} else {
int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine;
(routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position);
}
if (release_flag) blas_memory_free(buffer);
}
int exec_blas(BLASLONG num, blas_queue_t *queue){
BLASLONG i, buf_index;
if ((num <= 0) || (queue == NULL)) return 0;
#ifdef CONSISTENT_FPCSR
for (i = 0; i < num; i ++) {
__asm__ __volatile__ ("fnstcw %0" : "=m" (queue[i].x87_mode));
__asm__ __volatile__ ("stmxcsr %0" : "=m" (queue[i].sse_mode));
}
#endif
while(true) {
for(i=0; i < MAX_PARALLEL_NUMBER; i++) {
#ifdef HAVE_C11
_Bool inuse = false;
if(atomic_compare_exchange_weak(&blas_buffer_inuse[i], &inuse, true)) {
#else
if(blas_buffer_inuse[i] == false) {
blas_buffer_inuse[i] = true;
#endif
buf_index = i;
break;
}
}
if(i != MAX_PARALLEL_NUMBER)
break;
}
#pragma omp parallel for num_threads(num) schedule(OMP_SCHED)
for (i = 0; i < num; i ++) {
#ifndef USE_SIMPLE_THREADED_LEVEL3
queue[i].position = i;
#endif
exec_threads(&queue[i], buf_index);
}
#ifdef HAVE_C11
atomic_store(&blas_buffer_inuse[buf_index], false);
#else
blas_buffer_inuse[buf_index] = false;
#endif
return 0;
}
#endif
|
dgemm.c | #include <assert.h>
#include <errno.h>
#include <getopt.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#include <sched.h>
#ifdef USE_CBLAS
#include "cblas.h"
#elif defined(USE_NVBLAS)
#include "nvblas.h"
#elif defined(USE_MKL)
#include "mkl.h"
#endif
#include "dgemm.h"
static void do_dgemm(
double *matrixA,
double *matrixB,
double *matrixC,
int N,
double alpha,
double beta,
int repeats)
{
int i, j, k, r;
// ------------------------------------------------------- //
// VENDOR NOTIFICATION: START MODIFIABLE REGION
//
// Vendor is able to change the lines below to call optimized
// DGEMM or other matrix multiplication routines. Do *NOT*
// change any lines above this statement.
// ------------------------------------------------------- //
double sum = 0;
// Repeat multiple times
for(r = 0; r < repeats; r++) {
#if defined( USE_MKL ) || defined (USE_CBLAS)
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans,
N, N, N, alpha, matrixA, N, matrixB, N, beta, matrixC, N);
#elif defined( USE_NVBLAS )
char transA = 'N';
char transB = 'N';
dgemm(&transA, &transB, &N, &N, &N, &alpha, matrixA, &N,
matrixB, &N, &beta, matrixC, &N);
#else
// #pragma omp parallel for private(sum)
for(i = 0; i < N; i++) {
for(j = 0; j < N; j++) {
sum = 0;
for(k = 0; k < N; k++) {
sum += matrixA[i*N + k] * matrixB[k*N + j];
}
matrixC[i*N + j] = (alpha * sum) + (beta * matrixC[i*N + j]);
}
}
#endif
}
// ------------------------------------------------------- //
// VENDOR NOTIFICATION: END MODIFIABLE REGION
// ------------------------------------------------------- //
}
typedef struct {
double *restrict matrixA;
double *restrict matrixB;
double *restrict matrixC;
double alpha;
double beta;
int N;
int repeats;
} dgemm_thread_args_t;
static unsigned N;
static unsigned repeats = 8192;
static void dgemm_init(int argc, char *argv[],
const benchmark_config_t *const config) {
N = tune_size(dgemm_ops.name, config, sizeof(double), 3, 2);
static struct option longopts[] = {
{"dgemm-rounds", required_argument, NULL, 'r'}, {NULL, 0, NULL, 0}};
while (1) {
int c = getopt_long(argc, argv, "-", longopts, NULL);
if (c == -1)
break;
errno = 0;
switch (c) {
case 'r': {
unsigned long tmp = strtoul(optarg, NULL, 0);
if (errno == EINVAL || errno == ERANGE || tmp > INT_MAX) {
fprintf(stderr, "Could not parse --dgemm-rounds argument '%s': %s\n", optarg,
strerror(errno));
}
repeats = (unsigned)tmp;
} break;
case ':':
default:;
}
}
}
static void *init_argument(void *arg_) {
assert(arg_ == NULL);
dgemm_thread_args_t *arg =
(dgemm_thread_args_t *)malloc(sizeof(dgemm_thread_args_t));
arg->N = (int)N;
arg->repeats = (int)repeats;
arg->alpha = 1.0;
arg->beta = 1.0;
arg->matrixA = (double *)malloc(sizeof(double) * N * N);
arg->matrixB = (double *)malloc(sizeof(double) * N * N);
arg->matrixC = (double *)malloc(sizeof(double) * N * N);
for (unsigned j = 0; j < N; j++) {
for (unsigned k = 0; k < N; k++) {
arg->matrixA[j * N + k] = 2.0;
arg->matrixB[j * N + k] = 0.5;
arg->matrixC[j * N + k] = 1.0;
}
}
return arg;
}
static void destroy_argument(void *arg_) {
dgemm_thread_args_t *arg = (dgemm_thread_args_t *)arg_;
free(arg->matrixA);
free(arg->matrixB);
free(arg->matrixC);
free(arg);
}
static void *call_work(void *arg_) {
dgemm_thread_args_t *arg = (dgemm_thread_args_t *)arg_;
do_dgemm(arg->matrixA, arg->matrixB, arg->matrixC, arg->N, arg->alpha,
arg->beta, arg->repeats);
return NULL;
}
benchmark_t dgemm_ops = {
.name = "dgemm",
.init = dgemm_init,
.init_arg = init_argument,
.reset_arg = NULL,
.free_arg = destroy_argument,
.call = call_work,
.state = NULL,
};
#if 0
double init_and_do_dgemm(
char *hostname,
double *matrixA,
double *matrixB,
double *matrixC,
int N,
double alpha,
double beta,
int repeats)
{
int i, j, k, r;
int cpu;
cpu = sched_getcpu();
#pragma omp parallel for
for(i = 0; i < N; i++) {
for(j = 0; j < N; j++) {
matrixA[i*N + j] = 2.0;
matrixB[i*N + j] = 0.5;
matrixC[i*N + j] = 1.0;
}
}
// Do a warm up round
do_dgemm(matrixA, matrixB, matrixC, N, alpha, beta, 1);
printf("Performing multiplication...\n");
const double start = get_seconds();
do_dgemm(matrixA, matrixB, matrixC, N, alpha, beta, repeats);
// ------------------------------------------------------- //
// DO NOT CHANGE CODE BELOW
// ------------------------------------------------------- //
const double end = get_seconds();
// Account for warm up round
++repeats;
printf("Calculating matrix check...\n");
double final_sum = 0;
long long int count = 0;
#pragma omp parallel for reduction(+:final_sum, count)
for(i = 0; i < N; i++) {
for(j = 0; j < N; j++) {
final_sum += matrixC[i*N + j];
count++;
}
}
double N_dbl = (double) N;
double matrix_memory = (3 * N_dbl * N_dbl) * ((double) sizeof(double));
printf("\n");
printf("===============================================================\n");
const double count_dbl = (double) count;
const double scaled_result = (final_sum / (count_dbl * repeats));
printf("Final Sum is: %f\n", scaled_result);
const double check_sum = N_dbl + (1.0 / (double) (repeats));
const double allowed_margin = 1.0e-8;
if( (check_sum >= (scaled_result - allowed_margin)) &&
(check_sum <= (scaled_result + allowed_margin)) ) {
printf(" -> Solution check PASSED successfully.\n");
} else {
printf(" -> Solution check FAILED.\n");
}
printf("Memory for Matrices: %f MB\n",
(matrix_memory / (1024 * 1024)));
const double time_taken = (end - start);
printf("Multiply time: %f seconds\n", time_taken);
const double flops_computed = (N_dbl * N_dbl * N_dbl * 2.0 * (double)(repeats)) +
(N_dbl * N_dbl * 2 * (double)(repeats));
printf("FLOPs computed: %f\n", flops_computed);
double gflopsps = (flops_computed / time_taken) / 1000000000.0;
printf("%s, CPU: %d, GFLOP/s rate: %f GF/s\n",
hostname, cpu, gflopsps);
printf("===============================================================\n");
printf("\n");
return gflopsps;
}
// ------------------------------------------------------- //
// Function: main
//
// Modify only in permitted regions (see comments in the
// function)
// ------------------------------------------------------- //
int main(int argc, char* argv[]) {
// ------------------------------------------------------- //
// DO NOT CHANGE CODE BELOW
// ------------------------------------------------------- //
int N = 256;
int repeats = 30;
double alpha = 1.0;
double beta = 1.0;
char hostname[1024];
char filename[1024];
cpu_set_t cpuset;
int cpu, nr_cpus;
FILE *lf;
int rank;
#if 0
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
if (gethostname(hostname, sizeof(hostname)) < 0) {
perror("error: obtaining hostname\n");
exit(-1);
}
memset(&cpuset, 0, sizeof(cpuset));
if ((sched_getaffinity(0, sizeof(cpu_set_t), &cpuset)) < 0) {
perror("error: sched_getaffinity");
exit(-1);
}
nr_cpus = 0;
for (cpu = 0; cpu < (8 * sizeof(cpuset)); cpu++) {
if (!CPU_ISSET(cpu, &cpuset)) {
continue;
}
++nr_cpus;
}
sprintf(filename, "%s-rank_%d-nr_cpus_%d.csv", hostname, rank, nr_cpus);
lf = fopen(filename, "wb+");
if (!lf) {
perror("error: fopen");
exit(-1);
}
if(argc > 1) {
N = atoi(argv[1]);
printf("Matrix size input by command line: %d\n", N);
if(argc > 2) {
repeats = atoi(argv[2]);
if(repeats < 30) {
fprintf(stderr, "Error: repeats must be at least 30, setting is: %d\n", repeats);
exit(-1);
}
printf("Repeat multiply %d times.\n", repeats);
if(argc > 3) {
alpha = (double) atof(argv[3]);
if(argc > 4) {
beta = (double) atof(argv[4]);
}
}
} else {
printf("Repeat multiply defaulted to %d\n", repeats);
}
} else {
printf("Matrix size defaulted to %d\n", N);
}
printf("Alpha = %f\n", alpha);
printf("Beta = %f\n", beta);
/*
if(N < 128) {
printf("Error: N (%d) is less than 128, the matrix is too small.\n", N);
exit(-1);
}
*/
printf("Allocating Matrices...\n");
double* DGEMM_RESTRICT matrixA = (double*) malloc(sizeof(double) * N * N);
double* DGEMM_RESTRICT matrixB = (double*) malloc(sizeof(double) * N * N);
double* DGEMM_RESTRICT matrixC = (double*) malloc(sizeof(double) * N * N);
//printf("Allocation complete, populating with values...\n");
for (cpu = 0; cpu < (8 * sizeof(cpuset)); cpu++) {
cpu_set_t target_cpu;
double gflopsps;
int iter;
if (!CPU_ISSET(cpu, &cpuset)) {
continue;
}
memset(&target_cpu, 0, sizeof(cpu_set_t));
CPU_SET(cpu, &target_cpu);
if (sched_setaffinity(0, sizeof(cpu_set_t), &target_cpu) < 0) {
perror("error: sched_setaffinity");
exit(-1);
}
fprintf(lf, "%d", cpu);
for (iter = 0; iter < 5; ++iter) {
gflopsps = init_and_do_dgemm(hostname,
matrixA, matrixB, matrixC, N, alpha, beta, repeats);
}
for (iter = 0; iter < 10; ++iter) {
gflopsps = init_and_do_dgemm(hostname,
matrixA, matrixB, matrixC, N, alpha, beta, repeats);
fprintf(lf, ",%g", gflopsps);
}
fprintf(lf, "\n");
fflush(lf);
fflush(stdout);
}
fclose(lf);
free(matrixA);
free(matrixB);
free(matrixC);
#if 0
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
#endif
return 0;
}
#endif
|
depth-metrics.h | // License: Apache 2.0. See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved.
//
// Plane Fit implementation follows http://www.ilikebigbits.com/blog/2015/3/2/plane-from-points algorithm
#pragma once
#include <vector>
#include <mutex>
#include <array>
#include <imgui.h>
#include <librealsense2/rs.hpp>
#include "rendering.h"
namespace rs2
{
namespace depth_quality
{
struct snapshot_metrics
{
int width;
int height;
rs2::region_of_interest roi;
float distance;
float angle;
float angle_x;
float angle_y;
plane p;
std::array<float3, 4> plane_corners;
};
struct single_metric_data
{
single_metric_data(std::string name, float val) :
val(val), name(name) {}
float val;
std::string name;
};
using callback_type = std::function<void(
const std::vector<rs2::float3>& points,
const plane p,
const rs2::region_of_interest roi,
const float baseline_mm,
const float focal_length_pixels,
const int ground_thruth_mm,
const bool plane_fit,
const float plane_fit_to_ground_truth_mm,
const float distance_mm,
bool record,
std::vector<single_metric_data>& samples)>;
inline plane plane_from_point_and_normal(const rs2::float3& point, const rs2::float3& normal)
{
return{ normal.x, normal.y, normal.z, -(normal.x*point.x + normal.y*point.y + normal.z*point.z) };
}
//Based on: http://www.ilikebigbits.com/blog/2015/3/2/plane-from-points
inline plane plane_from_points(const std::vector<rs2::float3> points)
{
if (points.size() < 3) throw std::runtime_error("Not enough points to calculate plane");
rs2::float3 sum = { 0,0,0 };
for (auto point : points) sum = sum + point;
rs2::float3 centroid = sum / float(points.size());
double xx = 0, xy = 0, xz = 0, yy = 0, yz = 0, zz = 0;
for (auto point : points) {
rs2::float3 temp = point - centroid;
xx += temp.x * temp.x;
xy += temp.x * temp.y;
xz += temp.x * temp.z;
yy += temp.y * temp.y;
yz += temp.y * temp.z;
zz += temp.z * temp.z;
}
double det_x = yy*zz - yz*yz;
double det_y = xx*zz - xz*xz;
double det_z = xx*yy - xy*xy;
double det_max = std::max({ det_x, det_y, det_z });
if (det_max <= 0) return{ 0, 0, 0, 0 };
rs2::float3 dir{};
if (det_max == det_x)
{
float a = static_cast<float>((xz*yz - xy*zz) / det_x);
float b = static_cast<float>((xy*yz - xz*yy) / det_x);
dir = { 1, a, b };
}
else if (det_max == det_y)
{
float a = static_cast<float>((yz*xz - xy*zz) / det_y);
float b = static_cast<float>((xy*xz - yz*xx) / det_y);
dir = { a, 1, b };
}
else
{
float a = static_cast<float>((yz*xy - xz*yy) / det_z);
float b = static_cast<float>((xz*xy - yz*xx) / det_z);
dir = { a, b, 1 };
}
return plane_from_point_and_normal(centroid, dir.normalize());
}
inline double evaluate_pixel(const plane& p, const rs2_intrinsics* intrin, float x, float y, float distance, float3& output)
{
float pixel[2] = { x, y };
rs2_deproject_pixel_to_point(&output.x, intrin, pixel, distance);
return evaluate_plane(p, output);
}
inline float3 approximate_intersection(const plane& p, const rs2_intrinsics* intrin, float x, float y, float min, float max)
{
float3 point;
auto f = evaluate_pixel(p, intrin, x, y, max, point);
if (fabs(max - min) < 1e-3) return point;
auto n = evaluate_pixel(p, intrin, x, y, min, point);
if (f*n > 0) return{ 0, 0, 0 };
auto avg = (max + min) / 2;
auto mid = evaluate_pixel(p, intrin, x, y, avg, point);
if (mid*n < 0) return approximate_intersection(p, intrin, x, y, min, avg);
return approximate_intersection(p, intrin, x, y, avg, max);
}
inline float3 approximate_intersection(const plane& p, const rs2_intrinsics* intrin, float x, float y)
{
return approximate_intersection(p, intrin, x, y, 0.f, 1000.f);
}
inline snapshot_metrics analyze_depth_image(
const rs2::video_frame& frame,
float units, float baseline_mm,
const rs2_intrinsics * intrin,
rs2::region_of_interest roi,
const int ground_truth_mm,
bool plane_fit_present,
std::vector<single_metric_data>& samples,
bool record,
callback_type callback)
{
auto pixels = (const uint16_t*)frame.get_data();
const auto w = frame.get_width();
const auto h = frame.get_height();
snapshot_metrics result{ w, h, roi, {} };
std::mutex m;
std::vector<rs2::float3> roi_pixels;
//#pragma omp parallel for - TODO optimization envisaged
for (int y = roi.min_y; y < roi.max_y; ++y)
for (int x = roi.min_x; x < roi.max_x; ++x)
{
auto depth_raw = pixels[y*w + x];
if (depth_raw)
{
// units is float
float pixel[2] = { float(x), float(y) };
float point[3];
auto distance = depth_raw * units;
rs2_deproject_pixel_to_point(point, intrin, pixel, distance);
std::lock_guard<std::mutex> lock(m);
roi_pixels.push_back({ point[0], point[1], point[2] });
}
}
if (roi_pixels.size() < 3) { // Not enough pixels in RoI to fit a plane
return result;
}
plane p = plane_from_points(roi_pixels);
if (p == plane{ 0, 0, 0, 0 }) { // The points in RoI don't span a valid plane
return result;
}
// Calculate intersection of the plane fit with a ray along the center of ROI
// that by design coincides with the center of the frame
float3 plane_fit_pivot = approximate_intersection(p, intrin, intrin->width / 2.f, intrin->height / 2.f);
float plane_fit_to_gt_offset_mm = (ground_truth_mm > 0.f) ? (plane_fit_pivot.z * 1000 - ground_truth_mm) : 0;
result.p = p;
result.plane_corners[0] = approximate_intersection(p, intrin, float(roi.min_x), float(roi.min_y));
result.plane_corners[1] = approximate_intersection(p, intrin, float(roi.max_x), float(roi.min_y));
result.plane_corners[2] = approximate_intersection(p, intrin, float(roi.max_x), float(roi.max_y));
result.plane_corners[3] = approximate_intersection(p, intrin, float(roi.min_x), float(roi.max_y));
// Distance of origin (the camera) from the plane is encoded in parameter D of the plane
// The parameter represents the euclidian distance (along plane normal) from camera to the plane
result.distance = static_cast<float>(-p.d * 1000);
// Angle can be calculated from param C
result.angle = static_cast<float>(std::acos(std::abs(p.c)) / M_PI * 180.);
callback(roi_pixels, p, roi, baseline_mm, intrin->fx, ground_truth_mm, plane_fit_present,
plane_fit_to_gt_offset_mm, result.distance, record, samples);
// Calculate normal
auto n = float3{ p.a, p.b, p.c };
auto cam = float3{ 0.f, 0.f, -1.f };
auto dot = n * cam;
auto u = cam - n * dot;
result.angle_x = u.x;
result.angle_y = u.y;
return result;
}
}
}
|
omp_strsm_batch.c | /**
* @file omp_strsm_batch.c
*
* @brief BBLAS omp_strsm_batch float routine.
*
* BBLAS is a software package provided by Univ. of Manchester,
* Univ. of Tennessee.
*
* @version 1.0.0
* @author Samuel D. Relton
* @author Pedro V. Lara
* @author Mawussi Zounon
* @date 2016-02-20
*
**/
#ifndef DOXYGEN_SHOULD_SKIP_THIS
/**
* Code generation
* @generated from ./bblas_omp/omp_ztrsm_batch.c normal z -> s, Mon Jun 6 09:44:14 2016
**/
#endif
#include<cblas.h>
#include "bblas_omp.h"
#include "bblas.h"
#include <omp.h>
#define REAL
/**
Purpose
-------
<b>strsm_batch</b> is an OpenMP version of strsm_batch.
It solves for X in one of the matrix equations
op( arrayA[i] )*X = alpha*arrayB[i], or
X*op( arrayA[i] ) = alpha[i]*arrayB[i],
where op( X ) is one of
- op( X ) = X
or
- op( X ) = X**T
or
- op( X ) = X**H,
alpha[i] is a scalar, X and B are M[i] by N[i] matrices,
and arrayA[i] is a unit or non-unit, upper or lower triangular matrix.
The solution matrix X overwrites arrayB[i] on exit.
Fixed and Variable Batch Operations
-----------------------------------
Two types of batch operation are supported depending upon the value of batch_opts.
When <tt>batch_opts = BBLAS_VARIABLE</tt>
- all parameters that are arrays must have length at least batch_count.
- all parameters that are arrays must have all values set.
When <tt>batch_opts = BBLAS_FIXED</tt>
- all parameters that are arrays (except for arrayA, arrayB, and info)
must have length at least one.
- all parameters that are arrays (except for arrayA, arrayB, and info)
need only to have their first value set.
This means that for a <tt>BBLAS_FIXED</tt> batch,
the values of side[0], uplo[0], transA[0], diag[0], M[0], N[0],
alpha[0], lda[0], and ldb[0] are used for all computations.
Parameters
----------
@param[in]
side Array of <tt>enum BBLAS_SIDE</tt>.
Each element side[i] specifies whether op( arrayA[i] )
appears on the left or right side of the operation as follows:
- = 'BblasLeft' op( arrayA[i] )*X = alpha[i]*arrayB[i].
- = 'BblasRight' X*op( arrayA[i] ) = alpha[i]*arrayB[i].
@param[in]
uplo Array of <tt>enum BBLAS_UPLO</tt>.
On entry, uplo[i] specifies whether the matrix arrayA[i]
is upper or lower triangular as follows:
- = 'BblasUpper' arrayA[i] is an upper triangular matrix.
- = 'BblasLower' arrayA[i] is a lower triangular matrix.
@param[in]
transA Array of <tt>enum BBLAS_TRANS</tt>.
On entry, trans[i] specifies the form of op( arrayA[i] ) to be
used in the operation as follows:
- = 'BblasNoTrans' op( arrayA[i] ) = arrayA[i].
- = 'BblasTrans' op( arrayA[i] ) = arrayA[i]**T.
- = 'BblasConjTrans' op( arrayA[i] ) = arrayA'[i]**H.
@param[in]
diag - Array of <tt>enum BBLAS_DIAG</tt>.
On entry, diag[i] specifies whether or not arrayA[i] is unit
triangular as follows:
- = 'BblasUnit' arrayA[i] is assumed to be unit triangular.
- = 'BblasNonUnit' arrayA[i] is not assumed to be unit triangular.
@param[in]
M Array of <tt>int</tt>.
Each element M[i] specifies the number of rows of the matrix arrayB[i].
M[i] must be greater than zero.
@param[in]
N Array of <tt>int</tt>.
Each element N[i] specifies the number of columns of the matrix arrayB[i].
N[i] must be greater than zero.
@param[in]
alpha Array of REAL
When alpha[i] is set to zero arrayA[i] is not referenced and arrayB[i] need
not be set before entry.
@param[in]
arrayA Array of pointers.
Each element arrayA[i] is a pointer to a REAL matrix of
dimension lda[i] by Ka[i],
where Ka[i] = M[i] when side[i] = BblasLeft and is N[i] otherwise.
When using side[i] = BblasLeft the M[i] by M[i] part of arrayA[i]
must contain the triangular matrix:
when uplo[i] = BblasUpper, the upper triangular part of arrayA[i]
must contain the matrix whilst the strictly lower triangular part is not used;
similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i]
must contain the matrix whilst the strictly upper triangular part is not used.
When using side[i] = BblasRight the N[i] by N[i] part of arrayA[i] must
contain the symmetric matrix:
when uplo[i] = BblasUpper, the upper triangular part of arrayA[i]
must contain the matrix whilst the strictly lower triangular part is not used;
similarly when uplo[i] = BblasLower, the lower triangular part of arrayA[i]
must contain the matrix whilst the strictly upper triangular part is not used.
Note that when diag = BblasUnit the diagonal elements of arrayA[i] are
not used either, they are assumed to be equal to one.
@param[in]
lda Array of <tt>int</tt>.
On entry, lda[i] specifies the first dimension of arrayA[i] as declared
in the calling (sub) program. When side[i] = BblasLeft
then lda[i] must be at least max( 1, M[i] ),
otherwise lda[i] must be at least max( 1, N[i] ).
@param[in,out]
arrayB Array of pointers.
Each element arrayB[i] is a pointer to a REAL matrix of
dimension ldb[i] by N[i].
The leading M[i] by N[i] part of arrayB[i] must contain the matrix elements.
On exit is arrayB[i] overwritten by the solution matrix X.
@param[in]
ldb Array of <tt>int</tt>.
Each element ldb[i] specifies the first dimension of arrayB[i] as declared
in the calling (sub) program. Each element ldb[i] must be at least max( 1, M[i] ).
@param[in]
batch_count <tt>int</tt>
The number of matrices to operate on.
@param[in]
batch_opts <tt>enum BBLAS_OPTS</tt>
One of BBLAS_FIXED or BBLAS_VARIABLE depending upon the type of
batch operation required.
@param[out]
info Array of <tt>int</tt>.
Each element info[i] is the error return code of the ith strsm in the batch,
these need not be set on entry.
The error codes can be found in bblas_macros.h.
**/
void omp_strsm_batch(
const enum BBLAS_SIDE *side, const enum BBLAS_UPLO *uplo,
const enum BBLAS_TRANS *transA, const enum BBLAS_DIAG *diag,
const int *M, const int *N, const float *alpha,
const float **arrayA, const int *lda,
float **arrayB, const int *ldb,
const int batch_count, enum BBLAS_OPTS batch_opts, int *info)
{
/*Local variables */
int first_index = 0;
int batch_iter;
int LDA;
char func_name[15] = "strsm_batch";
/* Check input arguments */
if (batch_count < 0)
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_COUNT, -1);
}
if (batch_opts == BBLAS_FIXED)
{
if ((side[first_index] != BblasLeft) &&
(side[first_index] != BblasRight))
{
xerbla_batch(func_name, BBLAS_ERR_SIDE, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_SIDE;
}
return;
}
if ((uplo[first_index] != BblasUpper) &&
(uplo[first_index] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_UPLO;
}
return;
}
if ((transA[first_index] != BblasNoTrans) &&
(transA[first_index] != BblasTrans) &&
(transA[first_index] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANSA, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_TRANSA;
}
return;
}
if ((diag[first_index] != BblasNonUnit) &&
(diag[first_index] != BblasUnit))
{
xerbla_batch(func_name, BBLAS_ERR_DIAG, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_DIAG;
}
return;
}
if (M[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_M, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_M;
}
return;
}
if (N[first_index] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_N;
}
return;
}
if (side[first_index] == BblasLeft)
{
LDA = M[first_index];
} else
{
LDA = N[first_index];
}
if (lda[first_index] < max(1, LDA))
{
xerbla_batch(func_name, BBLAS_ERR_LDA, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDA;
}
return;
}
if (ldb[first_index] < max(1, M[first_index])) {
xerbla_batch(func_name, BBLAS_ERR_LDB, first_index);
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_ERR_LDB;
}
return;
}
/* particular case */
if (min(M[first_index], N[first_index]) == 0)
{
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
info[batch_iter] = BBLAS_SUCCESS;
}
return;
}
#pragma omp parallel for private(batch_iter)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/*Call to cblas_strsm */
cblas_strsm(
BblasColMajor,
side[first_index],
uplo[first_index],
transA[first_index],
diag[first_index],
M[first_index],
N[first_index],
(alpha[first_index]),
arrayA[batch_iter],
lda[first_index],
arrayB[batch_iter],
ldb[first_index]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
} /*END FIXED SIZE FOR LOOP */
}else if (batch_opts == BBLAS_VARIABLE)
{
#pragma omp parallel for private(batch_iter,LDA)
for (batch_iter = 0; batch_iter < batch_count; batch_iter++)
{
/* Check input arguments */
if ((side[batch_iter] != BblasLeft) &&
(side[batch_iter] != BblasRight))
{
xerbla_batch(func_name, BBLAS_ERR_SIDE, batch_iter);
info[batch_iter] = BBLAS_ERR_SIDE;
continue;
}
if ((uplo[batch_iter] != BblasUpper) &&
(uplo[batch_iter] != BblasLower))
{
xerbla_batch(func_name, BBLAS_ERR_UPLO, batch_iter);
info[batch_iter] = BBLAS_ERR_UPLO;
continue;
}
if ((transA[batch_iter] != BblasNoTrans) &&
(transA[batch_iter] != BblasTrans) &&
(transA[batch_iter] != BblasConjTrans))
{
xerbla_batch(func_name, BBLAS_ERR_TRANSA, batch_iter);
info[batch_iter] = BBLAS_ERR_TRANSA;
continue;
}
if (M[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_M, batch_iter);
info[batch_iter] = BBLAS_ERR_M;
continue;
}
if (N[batch_iter] < 0)
{
xerbla_batch(func_name, BBLAS_ERR_N, batch_iter);
info[batch_iter] = BBLAS_ERR_N;
continue;
}
if (side[batch_iter] == BblasLeft)
{
LDA = M[batch_iter];
} else
{
LDA = N[batch_iter];
}
if (lda[batch_iter] < max(1, LDA))
{
xerbla_batch(func_name, BBLAS_ERR_LDA, batch_iter);
info[batch_iter] = BBLAS_ERR_LDA;
continue;
}
if (ldb[batch_iter] < max(1, M[batch_iter]))
{
xerbla_batch(func_name, BBLAS_ERR_LDC, batch_iter);
info[batch_iter] = BBLAS_ERR_LDC;
continue;
}
/* particular case */
if (min(M[batch_iter], N[batch_iter]) == 0)
{
info[batch_iter] = BBLAS_SUCCESS;
continue;
}
cblas_strsm(
BblasColMajor,
side[batch_iter],
uplo[batch_iter],
transA[batch_iter],
diag[batch_iter],
M[batch_iter],
N[batch_iter],
(alpha[batch_iter]),
arrayA[batch_iter],
lda[batch_iter],
arrayB[batch_iter],
ldb[batch_iter]);
/* Successful */
info[batch_iter] = BBLAS_SUCCESS;
}
} else
{
xerbla_batch(func_name, BBLAS_ERR_BATCH_OPTS, -1);
}
}
#undef REAL
|
pgas_addr.h | #pragma once
#include <utility>
#include <cassert>
#include "adabs/adabs.h"
#include "adabs/allocator.h"
#include "adabs/tools/ptr_divider.h"
#include "adabs/tools/alignment.h"
#include "adabs/impl/pgas_addr.h"
namespace adabs {
template <typename T>
struct allocator;
namespace pgas {
inline void pgas_addr_remote_get (gasnet_token_t token,
gasnet_handlerarg_t arg0, // data pointer
gasnet_handlerarg_t arg1, // data pointer
gasnet_handlerarg_t arg2, // batch_mem size
gasnet_handlerarg_t arg3, // return data pointer
gasnet_handlerarg_t arg4, // return data pointer
gasnet_handlerarg_t arg5 // flag diff for remote pointer
);
inline void pgas_addr_remote_set (gasnet_token_t token, void *buf, size_t nbytes,
gasnet_handlerarg_t arg0, // data pointer
gasnet_handlerarg_t arg1 // data pointer
);
inline void pgas_addr_set_uninit(gasnet_token_t token,
gasnet_handlerarg_t arg0, // flag pointer
gasnet_handlerarg_t arg1, // flag pointer
gasnet_handlerarg_t arg2, // stride between flags
gasnet_handlerarg_t arg3, // nb of flags
gasnet_handlerarg_t arg4, // done marker pointer
gasnet_handlerarg_t arg5 // done marker pointer
);
inline void pgas_addr_check_get_all(gasnet_token_t token,
gasnet_handlerarg_t arg0, // flag pointer
gasnet_handlerarg_t arg1, // flag pointer
gasnet_handlerarg_t arg2, // stride between flags
gasnet_handlerarg_t arg3, // nb of flags
gasnet_handlerarg_t arg4, // done marker pointer
gasnet_handlerarg_t arg5 // done marker pointer
);
// TODO move in different file (incl. implementation)
inline void done_marker(gasnet_token_t token,
gasnet_handlerarg_t arg0, // done marker pointer
gasnet_handlerarg_t arg1 // done marker pointer
);
}
/**
* A address in our pgas world
*/
template <typename T>
class pgas_addr {
/******************* TYPEDEFS ******************/
public:
typedef T value_type;
enum {EMPTY, WRITTING, REQUESTED, FULL};
/******************* VARIABLES *****************/
private:
int _orig_node;
int _batch_size;
void* _orig_ptr; // just the plain start pointer
mutable T* _cache;
/**************** CON/DESTRUCTORS ***************/
public:
pgas_addr (void* ptr, const int batch_size) : _orig_ptr(ptr),
_orig_node(adabs::me),
_batch_size(batch_size),
_cache(0) {
if (is_local()) {
set_cache();
}
}
pgas_addr (void* ptr, const int batch_size, const int orig_node)
: _orig_ptr(ptr),
_orig_node(orig_node),
_batch_size(batch_size),
_cache(0) {
if (is_local())
set_cache();
}
pgas_addr (const pgas_addr<T> ©) : _orig_ptr(copy._orig_ptr),
_orig_node(copy._orig_node),
_batch_size(copy._batch_size),
_cache(0) {
if (is_local())
set_cache();
}
~pgas_addr() {
clear_cache();
}
/***************** FUNCTIONS *********************/
public:
T* get_data() const {
using namespace adabs::tools;
// must be called before is_available is called
set_cache();
if (!is_local()) {
const bool r = request();
if (r) {
int a = tools::alignment<T>::val();
if (a<sizeof(int)) a = sizeof(int);
GASNET_CALL(gasnet_AMRequestShort6(_orig_node,
adabs::impl::PGAS_ADDR_GET,
get_low(_orig_ptr),
get_high(_orig_ptr),
_batch_size * sizeof(T),
get_low(_cache),
get_high(_cache),
a
)
)
}
}
//std::cout << "waiting on flag " << (void*)get_flag() << " - local " << is_local() << std::endl;
while (!is_available()) {
}
//std::cout << "waiting on flag " << (void*)get_flag() << " - local " << is_local() << " - done!" << std::endl;
return _cache;
}
T* get_data_unitialized() {
set_cache();
const bool w = writing();
assert(w);
return _cache;
}
void set_data(T const * const data) {
assert (data == _cache);
using namespace adabs::tools;
__sync_synchronize();
const bool a = available();
assert (a);
//#pragma omp critical
//if (is_local()) std::cout << "local set on flag " << (void*)get_flag() << " - " << *(int*)get_flag() << std::endl;
if (!is_local()) {
GASNET_CALL(gasnet_AMRequestLong2(_orig_node,
adabs::impl::PGAS_ADDR_SET,
_cache,
sizeof(T)*_batch_size,
(void*)(_orig_ptr),
get_low(get_orig_flag()),
get_high(get_orig_flag())
)
)
}
}
bool is_local() const {
return (_orig_node == adabs::me);
}
pgas_addr<T>& operator+= (const int rhs) {
int a = tools::alignment<T>::val();
if (a<sizeof(int)) a = sizeof(int);
_orig_ptr = (void*)(
(char*)_orig_ptr
+ (a + sizeof(T)*_batch_size)*rhs
);
clear_cache();
set_cache();
return *this;
}
pgas_addr<T> operator+(const int rhs) const {
int a = tools::alignment<T>::val();
if (a<sizeof(int)) a = sizeof(int);
char* add = (char*)_orig_ptr;
add += (a + sizeof(T)*_batch_size)*rhs;
return pgas_addr<T>(add, _batch_size, _orig_node);
}
pgas_addr<T>& operator-= (const int rhs) {
int a = tools::alignment<T>::val();
if (a<sizeof(int)) a = sizeof(int);
_orig_ptr = (void*)(
(char*)_orig_ptr
- (a + sizeof(T)*_batch_size)*rhs
);
clear_cache();
set_cache();
return *this;
}
pgas_addr<T> operator-(const int rhs) const {
int a = tools::alignment<T>::val();
if (a<sizeof(int)) a = sizeof(int);
char* add = (char*)_orig_ptr;
add -= (a + sizeof(T)*_batch_size)*rhs;
return pgas_addr<T>(add, _batch_size, _orig_node);
}
/**
* This function call is unsafe, only use with care
* We'll have to check on which occasions we use it and than
* offer better solutions for these.
*/
void* get_raw_pointer() const {
return _orig_ptr;
}
int get_node() const {
return _orig_node;
}
int get_batch_size() const {
return _batch_size;
}
void* get_orig_flag() const {
//std::cout << "orig_ptr = " << _orig_ptr << std::endl;
char* temp = (char*) _orig_ptr + sizeof(T)*_batch_size;
return (void*)(temp);
}
int* get_flag() const {
assert(_cache!=0);
int a = tools::alignment<T>::val();
if (a<sizeof(int)) a = sizeof(int);
return (int*)((char*)_cache + sizeof(T)*_batch_size);
}
private:
void clear_cache() {
if (!is_local()) {
#pragma omp critical (pgas_cache)
if (_cache!=0) {
// not totally clean, allocated with adabs::allocator
// but currently fine
free(_cache);
_cache = 0;
}
}
}
void set_cache() const {
if (is_local()) {
_cache = (T*)((char*)(_orig_ptr));
} else {
#pragma omp critical (pgas_cache)
{
if (_cache == 0)
_cache = allocator<T>::allocate(_batch_size, _batch_size).get_data_ptr();
}
}
}
T* get_data_ptr() const {
return _cache;
}
// check if flag is set to 1
bool is_writing() const {
volatile int *reader = get_flag();
return (*reader == WRITTING);
}
// check if flag is set to 2
bool is_requested() const {
volatile int *reader = get_flag();
return (*reader == REQUESTED);
}
// check if flag is set to 3
bool is_available() const {
volatile int *reader = get_flag();
return (*reader == FULL);
}
// check and set flag to 1
bool writing() const {
volatile int *ptr = get_flag();
return __sync_bool_compare_and_swap(ptr, EMPTY, WRITTING);
}
// check and set flag to 2
bool request() const {
volatile int *ptr = get_flag();
return __sync_bool_compare_and_swap(ptr, EMPTY, REQUESTED);
}
// check and set flag to 3
bool available() const {
volatile int *ptr = get_flag();
//#pragma omp critical
//std::cout << me << ": avail 1 flag " << get_flag() << " - " << *ptr << std::endl;
int val = __sync_lock_test_and_set(ptr, FULL);
return (val == WRITTING || val == REQUESTED);
}
/****************** FRIEND CLASS **********************/
friend class allocator<T>;
};
namespace pgas {
inline void pgas_addr_set_uninit(gasnet_token_t token,
gasnet_handlerarg_t arg0, // flag pointer
gasnet_handlerarg_t arg1, // flag pointer
gasnet_handlerarg_t arg2, // stride between flags
gasnet_handlerarg_t arg3, // nb of flags
gasnet_handlerarg_t arg4, // done marker pointer
gasnet_handlerarg_t arg5 // done marker pointer
) {
using namespace adabs::tools;
int *flag = get_ptr<int>(arg0, arg1);
for (int i=0; i<arg3; ++i) {
int writting = __sync_val_compare_and_swap(flag, adabs::pgas_addr<void>::EMPTY, adabs::pgas_addr<void>::WRITTING);
assert(writting==adabs::pgas_addr<void>::EMPTY);
flag = (int*)((char*)flag + arg2);
}
int *return_marker = get_ptr<int>(arg4, arg5);
if (return_marker != 0) {
GASNET_CALL(gasnet_AMReplyShort2(token,
adabs::impl::SET_RETURN_MARKER,
arg4,
arg5
)
)
}
}
/**
* Pthread argument class
*/
struct pgas_addr_check_get_all_thread_arg {
volatile int *flag;
gasnet_handlerarg_t arg2;
gasnet_handlerarg_t arg3;
gasnet_handlerarg_t arg4;
gasnet_handlerarg_t arg5;
gasnet_node_t dest;
pgas_addr_check_get_all_thread_arg(volatile int *_flag,
gasnet_handlerarg_t _arg2,
gasnet_handlerarg_t _arg3,
gasnet_handlerarg_t _arg4,
gasnet_handlerarg_t _arg5,
gasnet_node_t _dest
) : flag(_flag),
arg2(_arg2),
arg3(_arg3),
arg4(_arg4),
arg5(_arg5),
dest(_dest) {}
};
inline void* pgas_addr_check_get_all_thread(void *threadarg) {
using namespace adabs::tools;
pgas_addr_check_get_all_thread_arg* arg = (pgas_addr_check_get_all_thread_arg*)threadarg;
for (int i=0; i<arg->arg3; ++i) {
// wait until flag is set
volatile int* reader = arg->flag;
while (*reader != adabs::pgas_addr<void>::FULL) {}
arg->flag = (volatile int*)((char*)arg->flag + arg->arg2);
}
int *return_marker = get_ptr<int>(arg->arg4, arg->arg5);
if (return_marker != 0) {
GASNET_CALL(gasnet_AMRequestShort2(arg->dest,
adabs::impl::SET_RETURN_MARKER,
arg->arg4,
arg->arg5
)
)
}
delete arg;
pthread_exit(0);
}
inline void pgas_addr_check_get_all(gasnet_token_t token,
gasnet_handlerarg_t arg0, // flag pointer
gasnet_handlerarg_t arg1, // flag pointer
gasnet_handlerarg_t arg2, // stride between flags
gasnet_handlerarg_t arg3, // nb of flags
gasnet_handlerarg_t arg4, // done marker pointer
gasnet_handlerarg_t arg5 // done marker pointer
) {
using namespace adabs::tools;
volatile int *flag = get_ptr<volatile int>(arg0, arg1);
gasnet_node_t src;
GASNET_CALL( gasnet_AMGetMsgSource(token, &src) )
pgas_addr_check_get_all_thread_arg *para = new pgas_addr_check_get_all_thread_arg(flag, arg2, arg3, arg4, arg5, src);
pthread_t thread_id;
pthread_attr_t attrb;
pthread_attr_init(&attrb);
pthread_attr_setdetachstate(&attrb, PTHREAD_CREATE_DETACHED);
pthread_create(&thread_id, &attrb, pgas_addr_check_get_all_thread, (void*) para);
}
inline void done_marker(gasnet_token_t token,
gasnet_handlerarg_t arg0, // done marker pointer
gasnet_handlerarg_t arg1 // done marker pointer
) {
using namespace adabs::tools;
int *return_marker = get_ptr<int>(arg0, arg1);
int val = __sync_lock_test_and_set(return_marker, 1);
assert (val == 0);
}
inline void pgas_addr_remote_set (gasnet_token_t token, void *buf, size_t nbytes,
gasnet_handlerarg_t arg0, // data pointer
gasnet_handlerarg_t arg1 // data pointer
) {
using namespace adabs::tools;
int *flag = get_ptr<int>(arg0, arg1);
//#pragma omp critical
//std::cout << gasnet_mynode() << " remote set receviced flag: " << flag << " data: " << buf << " - " << (void*)((char*)buf + nbytes) << std::endl;
__sync_synchronize();
int val = __sync_lock_test_and_set(flag, adabs::pgas_addr<void>::FULL);
assert(val != adabs::pgas_addr<void>::FULL);
}
inline void pgas_addr_remote_get (gasnet_token_t token,
gasnet_handlerarg_t arg0, // data pointer
gasnet_handlerarg_t arg1, // data pointer
gasnet_handlerarg_t arg2, // batch_mem size
gasnet_handlerarg_t arg3, // return data pointer
gasnet_handlerarg_t arg4, // return data pointer
gasnet_handlerarg_t arg5 // flag diff for remote pointer
) {
using namespace adabs::tools;
using namespace adabs::impl;
void *local = get_ptr<void>(arg0, arg1);
void *remote = get_ptr<void>(arg3, arg4);
gasnet_node_t src;
GASNET_CALL( gasnet_AMGetMsgSource(token, &src) )
#pragma omp critical (global_requests)
global_requests.insert(global_requests.begin(), remote_get_thread_arg(local, arg2, remote, src, arg5));
}
}
}
|
GB_unop__sin_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__sin_fc64_fc64)
// op(A') function: GB (_unop_tran__sin_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = csin (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = csin (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = csin (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SIN || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__sin_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = csin (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = csin (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__sin_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
compatibility.h | // -*- C++ -*-
// Copyright (C) 2007-2018 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/compatibility.h
* @brief Compatibility layer, mostly concerned with atomic operations.
*
* This file is a GNU parallel extension to the Standard C++ Library
* and contains implementation details for the library's internal use.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H
#define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1
#include <parallel/types.h>
#include <parallel/base.h>
#if !defined(_WIN32) || defined (__CYGWIN__)
#include <sched.h>
#endif
#ifdef __MINGW32__
// Including <windows.h> will drag in all the windows32 names. Since
// that can cause user code portability problems, we just declare the
// one needed function here.
extern "C"
__attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long);
#endif
namespace __gnu_parallel
{
template<typename _Tp>
inline _Tp
__add_omp(volatile _Tp* __ptr, _Tp __addend)
{
int64_t __res;
#pragma omp critical
{
__res = *__ptr;
*(__ptr) += __addend;
}
return __res;
}
/** @brief Add a value to a variable, atomically.
*
* @param __ptr Pointer to a signed integer.
* @param __addend Value to add.
*/
template<typename _Tp>
inline _Tp
__fetch_and_add(volatile _Tp* __ptr, _Tp __addend)
{
if (__atomic_always_lock_free(sizeof(_Tp), __ptr))
return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
return __add_omp(__ptr, __addend);
}
template<typename _Tp>
inline bool
__cas_omp(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
{
bool __res = false;
#pragma omp critical
{
if (*__ptr == __comparand)
{
*__ptr = __replacement;
__res = true;
}
}
return __res;
}
/** @brief Compare-and-swap
*
* Compare @c *__ptr and @c __comparand. If equal, let @c
* *__ptr=__replacement and return @c true, return @c false otherwise.
*
* @param __ptr Pointer to signed integer.
* @param __comparand Compare value.
* @param __replacement Replacement value.
*/
template<typename _Tp>
inline bool
__compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
{
if (__atomic_always_lock_free(sizeof(_Tp), __ptr))
return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
false, __ATOMIC_ACQ_REL,
__ATOMIC_RELAXED);
return __cas_omp(__ptr, __comparand, __replacement);
}
/** @brief Yield control to another thread, without waiting for
* the end of the time slice.
*/
inline void
__yield()
{
#if defined (_WIN32) && !defined (__CYGWIN__)
Sleep(0);
#else
sched_yield();
#endif
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_COMPATIBILITY_H */
|
StaticRTree.h | /*
Copyright (c) 2013, Project OSRM, Dennis Luxen, others
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list
of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef STATICRTREE_H
#define STATICRTREE_H
#include "DeallocatingVector.h"
#include "HilbertValue.h"
#include "PhantomNodes.h"
#include "QueryNode.h"
#include "SharedMemoryFactory.h"
#include "SharedMemoryVectorWrapper.h"
#include "../Util/MercatorUtil.h"
#include "../Util/OSRMException.h"
#include "../Util/SimpleLogger.h"
#include "../typedefs.h"
#include <osrm/Coordinate.h>
#include <boost/assert.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/thread.hpp>
#include <algorithm>
#include <array>
#include <chrono>
#include <limits>
#include <memory>
#include <queue>
#include <string>
#include <vector>
// tuning parameters
const static uint32_t RTREE_BRANCHING_FACTOR = 64;
const static uint32_t RTREE_LEAF_NODE_SIZE = 1024;
static boost::thread_specific_ptr<boost::filesystem::ifstream> thread_local_rtree_stream;
// Implements a static, i.e. packed, R-tree
template <class DataT,
class CoordinateListT = std::vector<FixedPointCoordinate>,
bool UseSharedMemory = false>
class StaticRTree
{
public:
struct RectangleInt2D
{
RectangleInt2D() : min_lon(INT_MAX), max_lon(INT_MIN), min_lat(INT_MAX), max_lat(INT_MIN) {}
int32_t min_lon, max_lon;
int32_t min_lat, max_lat;
inline void InitializeMBRectangle(const std::array<DataT, RTREE_LEAF_NODE_SIZE> &objects,
const uint32_t element_count,
const std::vector<NodeInfo> &coordinate_list)
{
for (uint32_t i = 0; i < element_count; ++i)
{
min_lon = std::min(min_lon,
std::min(coordinate_list.at(objects[i].u).lon,
coordinate_list.at(objects[i].v).lon));
max_lon = std::max(max_lon,
std::max(coordinate_list.at(objects[i].u).lon,
coordinate_list.at(objects[i].v).lon));
min_lat = std::min(min_lat,
std::min(coordinate_list.at(objects[i].u).lat,
coordinate_list.at(objects[i].v).lat));
max_lat = std::max(max_lat,
std::max(coordinate_list.at(objects[i].u).lat,
coordinate_list.at(objects[i].v).lat));
}
}
inline void AugmentMBRectangle(const RectangleInt2D &other)
{
min_lon = std::min(min_lon, other.min_lon);
max_lon = std::max(max_lon, other.max_lon);
min_lat = std::min(min_lat, other.min_lat);
max_lat = std::max(max_lat, other.max_lat);
}
inline FixedPointCoordinate Centroid() const
{
FixedPointCoordinate centroid;
// The coordinates of the midpoints are given by:
// x = (x1 + x2) /2 and y = (y1 + y2) /2.
centroid.lon = (min_lon + max_lon) / 2;
centroid.lat = (min_lat + max_lat) / 2;
return centroid;
}
inline bool Intersects(const RectangleInt2D &other) const
{
FixedPointCoordinate upper_left(other.max_lat, other.min_lon);
FixedPointCoordinate upper_right(other.max_lat, other.max_lon);
FixedPointCoordinate lower_right(other.min_lat, other.max_lon);
FixedPointCoordinate lower_left(other.min_lat, other.min_lon);
return (Contains(upper_left) || Contains(upper_right) || Contains(lower_right) ||
Contains(lower_left));
}
inline float GetMinDist(const FixedPointCoordinate &location) const
{
bool is_contained = Contains(location);
if (is_contained)
{
return 0.;
}
float min_dist = std::numeric_limits<float>::max();
min_dist = std::min(min_dist,
FixedPointCoordinate::ApproximateEuclideanDistance(
location.lat, location.lon, max_lat, min_lon));
min_dist = std::min(min_dist,
FixedPointCoordinate::ApproximateEuclideanDistance(
location.lat, location.lon, max_lat, max_lon));
min_dist = std::min(min_dist,
FixedPointCoordinate::ApproximateEuclideanDistance(
location.lat, location.lon, min_lat, max_lon));
min_dist = std::min(min_dist,
FixedPointCoordinate::ApproximateEuclideanDistance(
location.lat, location.lon, min_lat, min_lon));
return min_dist;
}
inline float GetMinMaxDist(const FixedPointCoordinate &location) const
{
float min_max_dist = std::numeric_limits<float>::max();
// Get minmax distance to each of the four sides
FixedPointCoordinate upper_left(max_lat, min_lon);
FixedPointCoordinate upper_right(max_lat, max_lon);
FixedPointCoordinate lower_right(min_lat, max_lon);
FixedPointCoordinate lower_left(min_lat, min_lon);
min_max_dist = std::min(
min_max_dist,
std::max(
FixedPointCoordinate::ApproximateEuclideanDistance(location, upper_left),
FixedPointCoordinate::ApproximateEuclideanDistance(location, upper_right)));
min_max_dist = std::min(
min_max_dist,
std::max(
FixedPointCoordinate::ApproximateEuclideanDistance(location, upper_right),
FixedPointCoordinate::ApproximateEuclideanDistance(location, lower_right)));
min_max_dist = std::min(
min_max_dist,
std::max(FixedPointCoordinate::ApproximateEuclideanDistance(location, lower_right),
FixedPointCoordinate::ApproximateEuclideanDistance(location, lower_left)));
min_max_dist = std::min(
min_max_dist,
std::max(FixedPointCoordinate::ApproximateEuclideanDistance(location, lower_left),
FixedPointCoordinate::ApproximateEuclideanDistance(location, upper_left)));
return min_max_dist;
}
inline bool Contains(const FixedPointCoordinate &location) const
{
const bool lats_contained = (location.lat > min_lat) && (location.lat < max_lat);
const bool lons_contained = (location.lon > min_lon) && (location.lon < max_lon);
return lats_contained && lons_contained;
}
inline friend std::ostream &operator<<(std::ostream &out, const RectangleInt2D &rect)
{
out << rect.min_lat / COORDINATE_PRECISION << "," << rect.min_lon / COORDINATE_PRECISION
<< " " << rect.max_lat / COORDINATE_PRECISION << ","
<< rect.max_lon / COORDINATE_PRECISION;
return out;
}
};
typedef RectangleInt2D RectangleT;
struct TreeNode
{
TreeNode() : child_count(0), child_is_on_disk(false) {}
RectangleT minimum_bounding_rectangle;
uint32_t child_count : 31;
bool child_is_on_disk : 1;
uint32_t children[RTREE_BRANCHING_FACTOR];
};
private:
struct WrappedInputElement
{
explicit WrappedInputElement(const uint32_t _array_index, const uint64_t _hilbert_value)
: m_array_index(_array_index), m_hilbert_value(_hilbert_value)
{
}
WrappedInputElement() : m_array_index(UINT_MAX), m_hilbert_value(0) {}
uint32_t m_array_index;
uint64_t m_hilbert_value;
inline bool operator<(const WrappedInputElement &other) const
{
return m_hilbert_value < other.m_hilbert_value;
}
};
struct LeafNode
{
LeafNode() : object_count(0) {}
uint32_t object_count;
std::array<DataT, RTREE_LEAF_NODE_SIZE> objects;
};
struct QueryCandidate
{
explicit QueryCandidate(const uint32_t n_id, const float dist)
: node_id(n_id), min_dist(dist)
{
}
QueryCandidate() : node_id(UINT_MAX), min_dist(std::numeric_limits<float>::max()) {}
uint32_t node_id;
float min_dist;
inline bool operator<(const QueryCandidate &other) const
{
return min_dist < other.min_dist;
}
};
typename ShM<TreeNode, UseSharedMemory>::vector m_search_tree;
uint64_t m_element_count;
const std::string m_leaf_node_filename;
std::shared_ptr<CoordinateListT> m_coordinate_list;
public:
StaticRTree() = delete;
StaticRTree(const StaticRTree &) = delete;
// Construct a packed Hilbert-R-Tree with Kamel-Faloutsos algorithm [1]
explicit StaticRTree(std::vector<DataT> &input_data_vector,
const std::string tree_node_filename,
const std::string leaf_node_filename,
const std::vector<NodeInfo> &coordinate_list)
: m_element_count(input_data_vector.size()), m_leaf_node_filename(leaf_node_filename)
{
SimpleLogger().Write() << "constructing r-tree of " << m_element_count
<< " edge elements build on-top of " << coordinate_list.size()
<< " coordinates";
std::chrono::time_point<std::chrono::steady_clock> time0 = std::chrono::steady_clock::now();
std::vector<WrappedInputElement> input_wrapper_vector(m_element_count);
HilbertCode get_hilbert_number;
// generate auxiliary vector of hilbert-values
#pragma omp parallel for schedule(guided)
for (uint64_t element_counter = 0; element_counter < m_element_count; ++element_counter)
{
input_wrapper_vector[element_counter].m_array_index = element_counter;
// Get Hilbert-Value for centroid in mercartor projection
DataT const ¤t_element = input_data_vector[element_counter];
FixedPointCoordinate current_centroid =
DataT::Centroid(FixedPointCoordinate(coordinate_list.at(current_element.u).lat,
coordinate_list.at(current_element.u).lon),
FixedPointCoordinate(coordinate_list.at(current_element.v).lat,
coordinate_list.at(current_element.v).lon));
current_centroid.lat =
COORDINATE_PRECISION * lat2y(current_centroid.lat / COORDINATE_PRECISION);
uint64_t current_hilbert_value = get_hilbert_number(current_centroid);
input_wrapper_vector[element_counter].m_hilbert_value = current_hilbert_value;
}
// open leaf file
boost::filesystem::ofstream leaf_node_file(leaf_node_filename, std::ios::binary);
leaf_node_file.write((char *)&m_element_count, sizeof(uint64_t));
// sort the hilbert-value representatives
std::sort(input_wrapper_vector.begin(), input_wrapper_vector.end());
std::vector<TreeNode> tree_nodes_in_level;
// pack M elements into leaf node and write to leaf file
uint64_t processed_objects_count = 0;
while (processed_objects_count < m_element_count)
{
LeafNode current_leaf;
TreeNode current_node;
// SimpleLogger().Write() << "reading " << tree_size << " tree nodes in " <<
// (sizeof(TreeNode)*tree_size) << " bytes";
for (uint32_t current_element_index = 0; RTREE_LEAF_NODE_SIZE > current_element_index;
++current_element_index)
{
if (m_element_count > (processed_objects_count + current_element_index))
{
uint32_t index_of_next_object =
input_wrapper_vector[processed_objects_count + current_element_index]
.m_array_index;
current_leaf.objects[current_element_index] =
input_data_vector[index_of_next_object];
++current_leaf.object_count;
}
}
// generate tree node that resemble the objects in leaf and store it for next level
current_node.minimum_bounding_rectangle.InitializeMBRectangle(
current_leaf.objects, current_leaf.object_count, coordinate_list);
current_node.child_is_on_disk = true;
current_node.children[0] = tree_nodes_in_level.size();
tree_nodes_in_level.emplace_back(current_node);
// write leaf_node to leaf node file
leaf_node_file.write((char *)¤t_leaf, sizeof(current_leaf));
processed_objects_count += current_leaf.object_count;
}
// close leaf file
leaf_node_file.close();
uint32_t processing_level = 0;
while (1 < tree_nodes_in_level.size())
{
std::vector<TreeNode> tree_nodes_in_next_level;
uint32_t processed_tree_nodes_in_level = 0;
while (processed_tree_nodes_in_level < tree_nodes_in_level.size())
{
TreeNode parent_node;
// pack RTREE_BRANCHING_FACTOR elements into tree_nodes each
for (uint32_t current_child_node_index = 0;
RTREE_BRANCHING_FACTOR > current_child_node_index;
++current_child_node_index)
{
if (processed_tree_nodes_in_level < tree_nodes_in_level.size())
{
TreeNode ¤t_child_node =
tree_nodes_in_level[processed_tree_nodes_in_level];
// add tree node to parent entry
parent_node.children[current_child_node_index] = m_search_tree.size();
m_search_tree.emplace_back(current_child_node);
// augment MBR of parent
parent_node.minimum_bounding_rectangle.AugmentMBRectangle(
current_child_node.minimum_bounding_rectangle);
// increase counters
++parent_node.child_count;
++processed_tree_nodes_in_level;
}
}
tree_nodes_in_next_level.emplace_back(parent_node);
}
tree_nodes_in_level.swap(tree_nodes_in_next_level);
++processing_level;
}
BOOST_ASSERT_MSG(1 == tree_nodes_in_level.size(), "tree broken, more than one root node");
// last remaining entry is the root node, store it
m_search_tree.emplace_back(tree_nodes_in_level[0]);
// reverse and renumber tree to have root at index 0
std::reverse(m_search_tree.begin(), m_search_tree.end());
#pragma omp parallel for schedule(guided)
for (uint32_t i = 0; i < m_search_tree.size(); ++i)
{
TreeNode ¤t_tree_node = m_search_tree[i];
for (uint32_t j = 0; j < current_tree_node.child_count; ++j)
{
const uint32_t old_id = current_tree_node.children[j];
const uint32_t new_id = m_search_tree.size() - old_id - 1;
current_tree_node.children[j] = new_id;
}
}
// open tree file
boost::filesystem::ofstream tree_node_file(tree_node_filename, std::ios::binary);
uint32_t size_of_tree = m_search_tree.size();
BOOST_ASSERT_MSG(0 < size_of_tree, "tree empty");
tree_node_file.write((char *)&size_of_tree, sizeof(uint32_t));
tree_node_file.write((char *)&m_search_tree[0], sizeof(TreeNode) * size_of_tree);
// close tree node file.
tree_node_file.close();
std::chrono::time_point<std::chrono::steady_clock> time1 = std::chrono::steady_clock::now();
std::chrono::duration<double> elapsed_seconds = time1 - time0;
SimpleLogger().Write() << "finished r-tree construction in " << (elapsed_seconds.count())
<< " seconds";
}
// Read-only operation for queries
explicit StaticRTree(const boost::filesystem::path &node_file,
const boost::filesystem::path &leaf_file,
const std::shared_ptr<CoordinateListT> coordinate_list)
: m_leaf_node_filename(leaf_file.string())
{
// open tree node file and load into RAM.
m_coordinate_list = coordinate_list;
if (!boost::filesystem::exists(node_file))
{
throw OSRMException("ram index file does not exist");
}
if (0 == boost::filesystem::file_size(node_file))
{
throw OSRMException("ram index file is empty");
}
boost::filesystem::ifstream tree_node_file(node_file, std::ios::binary);
uint32_t tree_size = 0;
tree_node_file.read((char *)&tree_size, sizeof(uint32_t));
m_search_tree.resize(tree_size);
tree_node_file.read((char *)&m_search_tree[0], sizeof(TreeNode) * tree_size);
tree_node_file.close();
// open leaf node file and store thread specific pointer
if (!boost::filesystem::exists(leaf_file))
{
throw OSRMException("mem index file does not exist");
}
if (0 == boost::filesystem::file_size(leaf_file))
{
throw OSRMException("mem index file is empty");
}
boost::filesystem::ifstream leaf_node_file(leaf_file, std::ios::binary);
leaf_node_file.read((char *)&m_element_count, sizeof(uint64_t));
leaf_node_file.close();
// SimpleLogger().Write() << tree_size << " nodes in search tree";
// SimpleLogger().Write() << m_element_count << " elements in leafs";
}
explicit StaticRTree(TreeNode *tree_node_ptr,
const uint32_t number_of_nodes,
const boost::filesystem::path &leaf_file,
std::shared_ptr<CoordinateListT> coordinate_list)
: m_search_tree(tree_node_ptr, number_of_nodes), m_leaf_node_filename(leaf_file.string()),
m_coordinate_list(coordinate_list)
{
// open leaf node file and store thread specific pointer
if (!boost::filesystem::exists(leaf_file))
{
throw OSRMException("mem index file does not exist");
}
if (0 == boost::filesystem::file_size(leaf_file))
{
throw OSRMException("mem index file is empty");
}
boost::filesystem::ifstream leaf_node_file(leaf_file, std::ios::binary);
leaf_node_file.read((char *)&m_element_count, sizeof(uint64_t));
leaf_node_file.close();
if (thread_local_rtree_stream.get())
{
thread_local_rtree_stream->close();
}
// SimpleLogger().Write() << tree_size << " nodes in search tree";
// SimpleLogger().Write() << m_element_count << " elements in leafs";
}
// Read-only operation for queries
bool LocateClosestEndPointForCoordinate(const FixedPointCoordinate &input_coordinate,
FixedPointCoordinate &result_coordinate,
const unsigned zoom_level)
{
bool ignore_tiny_components = (zoom_level <= 14);
DataT nearest_edge;
float min_dist = std::numeric_limits<float>::max();
float min_max_dist = std::numeric_limits<float>::max();
bool found_a_nearest_edge = false;
// initialize queue with root element
std::priority_queue<QueryCandidate> traversal_queue;
float current_min_dist =
m_search_tree[0].minimum_bounding_rectangle.GetMinDist(input_coordinate);
traversal_queue.emplace(0, current_min_dist);
while (!traversal_queue.empty())
{
const QueryCandidate current_query_node = traversal_queue.top();
traversal_queue.pop();
const bool prune_downward = (current_query_node.min_dist >= min_max_dist);
const bool prune_upward = (current_query_node.min_dist >= min_dist);
if (!prune_downward && !prune_upward)
{ // downward pruning
TreeNode ¤t_tree_node = m_search_tree[current_query_node.node_id];
if (current_tree_node.child_is_on_disk)
{
LeafNode current_leaf_node;
LoadLeafFromDisk(current_tree_node.children[0], current_leaf_node);
for (uint32_t i = 0; i < current_leaf_node.object_count; ++i)
{
DataT const ¤t_edge = current_leaf_node.objects[i];
if (ignore_tiny_components && current_edge.is_in_tiny_cc)
{
continue;
}
float current_minimum_distance =
FixedPointCoordinate::ApproximateEuclideanDistance(
input_coordinate.lat,
input_coordinate.lon,
m_coordinate_list->at(current_edge.u).lat,
m_coordinate_list->at(current_edge.u).lon);
if (current_minimum_distance < min_dist)
{
// found a new minimum
min_dist = current_minimum_distance;
result_coordinate.lat = m_coordinate_list->at(current_edge.u).lat;
result_coordinate.lon = m_coordinate_list->at(current_edge.u).lon;
found_a_nearest_edge = true;
}
current_minimum_distance =
FixedPointCoordinate::ApproximateEuclideanDistance(
input_coordinate.lat,
input_coordinate.lon,
m_coordinate_list->at(current_edge.v).lat,
m_coordinate_list->at(current_edge.v).lon);
if (current_minimum_distance < min_dist)
{
// found a new minimum
min_dist = current_minimum_distance;
result_coordinate.lat = m_coordinate_list->at(current_edge.v).lat;
result_coordinate.lon = m_coordinate_list->at(current_edge.v).lon;
found_a_nearest_edge = true;
}
}
}
else
{
// traverse children, prune if global mindist is smaller than local one
for (uint32_t i = 0; i < current_tree_node.child_count; ++i)
{
const int32_t child_id = current_tree_node.children[i];
const TreeNode &child_tree_node = m_search_tree[child_id];
const RectangleT &child_rectangle =
child_tree_node.minimum_bounding_rectangle;
const float current_min_dist =
child_rectangle.GetMinDist(input_coordinate);
const float current_min_max_dist =
child_rectangle.GetMinMaxDist(input_coordinate);
if (current_min_max_dist < min_max_dist)
{
min_max_dist = current_min_max_dist;
}
if (current_min_dist > min_max_dist)
{
continue;
}
if (current_min_dist > min_dist)
{ // upward pruning
continue;
}
traversal_queue.emplace(child_id, current_min_dist);
}
}
}
}
return found_a_nearest_edge;
}
bool FindPhantomNodeForCoordinate(const FixedPointCoordinate &input_coordinate,
PhantomNode &result_phantom_node,
const unsigned zoom_level)
{
// SimpleLogger().Write() << "searching for coordinate " << input_coordinate;
const bool ignore_tiny_components = (zoom_level <= 14);
DataT nearest_edge;
float min_dist = std::numeric_limits<float>::max();
float min_max_dist = std::numeric_limits<float>::max();
bool found_a_nearest_edge = false;
FixedPointCoordinate nearest, current_start_coordinate, current_end_coordinate;
// initialize queue with root element
std::priority_queue<QueryCandidate> traversal_queue;
float current_min_dist =
m_search_tree[0].minimum_bounding_rectangle.GetMinDist(input_coordinate);
traversal_queue.emplace(0, current_min_dist);
BOOST_ASSERT_MSG(std::numeric_limits<float>::epsilon() >
(0. - traversal_queue.top().min_dist),
"Root element in NN Search has min dist != 0.");
LeafNode current_leaf_node;
while (!traversal_queue.empty())
{
const QueryCandidate current_query_node = traversal_queue.top();
traversal_queue.pop();
const bool prune_downward = (current_query_node.min_dist >= min_max_dist);
const bool prune_upward = (current_query_node.min_dist >= min_dist);
if (!prune_downward && !prune_upward)
{ // downward pruning
const TreeNode ¤t_tree_node = m_search_tree[current_query_node.node_id];
if (current_tree_node.child_is_on_disk)
{
LoadLeafFromDisk(current_tree_node.children[0], current_leaf_node);
for (uint32_t i = 0; i < current_leaf_node.object_count; ++i)
{
DataT ¤t_edge = current_leaf_node.objects[i];
if (ignore_tiny_components && current_edge.is_in_tiny_cc)
{
continue;
}
float current_ratio = 0.;
const float current_perpendicular_distance =
FixedPointCoordinate::ComputePerpendicularDistance(
m_coordinate_list->at(current_edge.u),
m_coordinate_list->at(current_edge.v),
input_coordinate,
nearest,
current_ratio);
BOOST_ASSERT(0. <= current_perpendicular_distance);
if ((current_perpendicular_distance < min_dist) &&
!EpsilonCompare(current_perpendicular_distance, min_dist))
{ // found a new minimum
min_dist = current_perpendicular_distance;
// TODO: use assignment c'tor in PhantomNode
result_phantom_node.forward_node_id =
current_edge.forward_edge_based_node_id;
result_phantom_node.reverse_node_id =
current_edge.reverse_edge_based_node_id;
result_phantom_node.name_id = current_edge.name_id;
result_phantom_node.forward_weight = current_edge.forward_weight;
result_phantom_node.reverse_weight = current_edge.reverse_weight;
result_phantom_node.forward_offset = current_edge.forward_offset;
result_phantom_node.reverse_offset = current_edge.reverse_offset;
result_phantom_node.packed_geometry_id =
current_edge.packed_geometry_id;
result_phantom_node.fwd_segment_position =
current_edge.fwd_segment_position;
result_phantom_node.location = nearest;
current_start_coordinate.lat =
m_coordinate_list->at(current_edge.u).lat;
current_start_coordinate.lon =
m_coordinate_list->at(current_edge.u).lon;
current_end_coordinate.lat = m_coordinate_list->at(current_edge.v).lat;
current_end_coordinate.lon = m_coordinate_list->at(current_edge.v).lon;
nearest_edge = current_edge;
found_a_nearest_edge = true;
}
}
}
else
{
// traverse children, prune if global mindist is smaller than local one
for (uint32_t i = 0; i < current_tree_node.child_count; ++i)
{
const int32_t child_id = current_tree_node.children[i];
TreeNode &child_tree_node = m_search_tree[child_id];
RectangleT &child_rectangle = child_tree_node.minimum_bounding_rectangle;
const float current_min_dist =
child_rectangle.GetMinDist(input_coordinate);
const float current_min_max_dist =
child_rectangle.GetMinMaxDist(input_coordinate);
if (current_min_max_dist < min_max_dist)
{
min_max_dist = current_min_max_dist;
}
if (current_min_dist > min_max_dist)
{
continue;
}
if (current_min_dist > min_dist)
{ // upward pruning
continue;
}
traversal_queue.emplace(child_id, current_min_dist);
}
}
}
}
// Hack to fix rounding errors and wandering via nodes.
if (1 == std::abs(input_coordinate.lon - result_phantom_node.location.lon))
{
result_phantom_node.location.lon = input_coordinate.lon;
}
if (1 == std::abs(input_coordinate.lat - result_phantom_node.location.lat))
{
result_phantom_node.location.lat = input_coordinate.lat;
}
float ratio = 0.f;
if (found_a_nearest_edge)
{
const float distance_1 = FixedPointCoordinate::ApproximateEuclideanDistance(
current_start_coordinate, result_phantom_node.location);
const float distance_2 = FixedPointCoordinate::ApproximateEuclideanDistance(
current_start_coordinate, current_end_coordinate);
ratio = distance_1 / distance_2;
ratio = std::min(1.f, ratio);
if (SPECIAL_NODEID != result_phantom_node.forward_node_id)
{
result_phantom_node.forward_weight *= ratio;
}
if (SPECIAL_NODEID != result_phantom_node.reverse_node_id)
{
result_phantom_node.reverse_weight *= (1. - ratio);
}
}
return found_a_nearest_edge;
}
private:
inline void LoadLeafFromDisk(const uint32_t leaf_id, LeafNode &result_node)
{
if (!thread_local_rtree_stream.get() || !thread_local_rtree_stream->is_open())
{
thread_local_rtree_stream.reset(new boost::filesystem::ifstream(
m_leaf_node_filename, std::ios::in | std::ios::binary));
}
if (!thread_local_rtree_stream->good())
{
thread_local_rtree_stream->clear(std::ios::goodbit);
SimpleLogger().Write(logDEBUG) << "Resetting stale filestream";
}
uint64_t seek_pos = sizeof(uint64_t) + leaf_id * sizeof(LeafNode);
thread_local_rtree_stream->seekg(seek_pos);
thread_local_rtree_stream->read((char *)&result_node, sizeof(LeafNode));
}
inline bool EdgesAreEquivalent(const FixedPointCoordinate &a,
const FixedPointCoordinate &b,
const FixedPointCoordinate &c,
const FixedPointCoordinate &d) const
{
return (a == b && c == d) || (a == c && b == d) || (a == d && b == c);
}
template<typename FloatT>
inline bool EpsilonCompare(const FloatT d1, const FloatT d2) const
{
return (std::abs(d1 - d2) < std::numeric_limits<FloatT>::epsilon());
}
};
//[1] "On Packing R-Trees"; I. Kamel, C. Faloutsos; 1993; DOI: 10.1145/170088.170403
//[2] "Nearest Neighbor Queries", N. Roussopulos et al; 1995; DOI: 10.1145/223784.223794
#endif // STATICRTREE_H
|
ast-dump-openmp-parallel-sections.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_zero() {
#pragma omp parallel sections
{}
}
void test_one() {
#pragma omp parallel sections
{ ; }
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-parallel-sections.c:3:1, line:6:1> line:3:6 test_zero 'void ()'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:18, line:6:1>
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:8:1, line:11:1> line:8:6 test_one 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:11:1>
// CHECK-NEXT: `-OMPParallelSectionsDirective {{.*}} <line:9:1, col:30>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:10:3, col:7>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: |-CompoundStmt {{.*}} <col:3, col:7> openmp_structured_block
// CHECK-NEXT: | `-NullStmt {{.*}} <col:5>
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:9:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-parallel-sections.c:9:1) *const restrict'
|
vadd.orio.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h>
double getClock()
{
struct timezone tzp;
struct timeval tp;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main(int argc, char *argv[])
{
double *y;
double *x1;
double *x2;
double *x3;
#pragma disjoint (*x1,*x2,*x3,*y)
int n = N;
{
int i1;
y = (double*) malloc((n) * sizeof(double));
x1 = (double*) malloc((n) * sizeof(double));
x2 = (double*) malloc((n) * sizeof(double));
x3 = (double*) malloc((n) * sizeof(double));
for (i1=0; i1<n; i1++) {
x1[i1] = (i1+1) % 4 + 1;
x2[i1] = (i1+5) % 10 + 1;
x3[i1] = (i1+3) % 6 + 1;
y[i1] = 0;
}
}
double orio_t_start, orio_t_end, orio_t_total=0;
int orio_i;
int reps = REPS;
#ifdef TEST
reps = 1;
#endif
orio_t_start = getClock();
for (orio_i=0; orio_i<reps; orio_i++)
{
int i;
if (n <= 65) {
// parallelize=False, ufactor=3
if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) {
__alignx(16,x1);
__alignx(16,x2);
__alignx(16,x3);
__alignx(16,y);
for (i=0; i<=n-3; i=i+3) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
}
for (; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
} else {
for (i=0; i<=n-3; i=i+3) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
}
for (; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
}
#ifdef ORIO_SEQ
} else if (n <= 810) {
// parallelize=False, ufactor=10
if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) {
__alignx(16,x1);
__alignx(16,x2);
__alignx(16,x3);
__alignx(16,y);
for (i=0; i<=n-10; i=i+10) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)];
y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)];
y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)];
y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)];
y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)];
y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)];
y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)];
}
for (; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
} else {
for (i=0; i<=n-10; i=i+10) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)];
y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)];
y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)];
y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)];
y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)];
y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)];
y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)];
}
for (; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
}
} else if (n <= 131250) {
// parallelize=False, ufactor=20
if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) {
__alignx(16,x1);
__alignx(16,x2);
__alignx(16,x3);
__alignx(16,y);
for (i=0; i<=n-20; i=i+20) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)];
y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)];
y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)];
y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)];
y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)];
y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)];
y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)];
y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)];
y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)];
y[(i+12)]=x1[(i+12)]+x2[(i+12)]+x3[(i+12)];
y[(i+13)]=x1[(i+13)]+x2[(i+13)]+x3[(i+13)];
y[(i+14)]=x1[(i+14)]+x2[(i+14)]+x3[(i+14)];
y[(i+15)]=x1[(i+15)]+x2[(i+15)]+x3[(i+15)];
y[(i+16)]=x1[(i+16)]+x2[(i+16)]+x3[(i+16)];
y[(i+17)]=x1[(i+17)]+x2[(i+17)]+x3[(i+17)];
y[(i+18)]=x1[(i+18)]+x2[(i+18)]+x3[(i+18)];
y[(i+19)]=x1[(i+19)]+x2[(i+19)]+x3[(i+19)];
}
for (; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
} else {
for (i=0; i<=n-20; i=i+20) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)];
y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)];
y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)];
y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)];
y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)];
y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)];
y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)];
y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)];
y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)];
y[(i+12)]=x1[(i+12)]+x2[(i+12)]+x3[(i+12)];
y[(i+13)]=x1[(i+13)]+x2[(i+13)]+x3[(i+13)];
y[(i+14)]=x1[(i+14)]+x2[(i+14)]+x3[(i+14)];
y[(i+15)]=x1[(i+15)]+x2[(i+15)]+x3[(i+15)];
y[(i+16)]=x1[(i+16)]+x2[(i+16)]+x3[(i+16)];
y[(i+17)]=x1[(i+17)]+x2[(i+17)]+x3[(i+17)];
y[(i+18)]=x1[(i+18)]+x2[(i+18)]+x3[(i+18)];
y[(i+19)]=x1[(i+19)]+x2[(i+19)]+x3[(i+19)];
}
for (; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
}
} else {
// parallelize=False, ufactor=16
if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) {
__alignx(16,x1);
__alignx(16,x2);
__alignx(16,x3);
__alignx(16,y);
for (i=0; i<=n-16; i=i+16) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)];
y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)];
y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)];
y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)];
y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)];
y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)];
y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)];
y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)];
y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)];
y[(i+12)]=x1[(i+12)]+x2[(i+12)]+x3[(i+12)];
y[(i+13)]=x1[(i+13)]+x2[(i+13)]+x3[(i+13)];
y[(i+14)]=x1[(i+14)]+x2[(i+14)]+x3[(i+14)];
y[(i+15)]=x1[(i+15)]+x2[(i+15)]+x3[(i+15)];
}
for (; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
} else {
for (i=0; i<=n-16; i=i+16) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)];
y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)];
y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)];
y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)];
y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)];
y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)];
y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)];
y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)];
y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)];
y[(i+12)]=x1[(i+12)]+x2[(i+12)]+x3[(i+12)];
y[(i+13)]=x1[(i+13)]+x2[(i+13)]+x3[(i+13)];
y[(i+14)]=x1[(i+14)]+x2[(i+14)]+x3[(i+14)];
y[(i+15)]=x1[(i+15)]+x2[(i+15)]+x3[(i+15)];
}
for (; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
}
}
#elif ORIO_PAR
} else if (n <= 835) {
// parallelize=False, ufactor=10
if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) {
__alignx(16,x1);
__alignx(16,x2);
__alignx(16,x3);
__alignx(16,y);
for (i=0; i<=n-10; i=i+10) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)];
y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)];
y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)];
y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)];
y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)];
y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)];
y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)];
}
for (; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
} else {
for (i=0; i<=n-10; i=i+10) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)];
y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)];
y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)];
y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)];
y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)];
y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)];
y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)];
}
for (; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
}
} else if (n <= 132500) {
// parallelize=True, ufactor=20
if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) {
__alignx(16,x1);
__alignx(16,x2);
__alignx(16,x3);
__alignx(16,y);
#pragma omp parallel for
for (i=0; i<=n-20; i=i+20) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)];
y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)];
y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)];
y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)];
y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)];
y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)];
y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)];
y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)];
y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)];
y[(i+12)]=x1[(i+12)]+x2[(i+12)]+x3[(i+12)];
y[(i+13)]=x1[(i+13)]+x2[(i+13)]+x3[(i+13)];
y[(i+14)]=x1[(i+14)]+x2[(i+14)]+x3[(i+14)];
y[(i+15)]=x1[(i+15)]+x2[(i+15)]+x3[(i+15)];
y[(i+16)]=x1[(i+16)]+x2[(i+16)]+x3[(i+16)];
y[(i+17)]=x1[(i+17)]+x2[(i+17)]+x3[(i+17)];
y[(i+18)]=x1[(i+18)]+x2[(i+18)]+x3[(i+18)];
y[(i+19)]=x1[(i+19)]+x2[(i+19)]+x3[(i+19)];
}
for (i=n-((n-1)%20)-1; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
} else {
#pragma omp parallel for
for (i=0; i<=n-20; i=i+20) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)];
y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)];
y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)];
y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)];
y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)];
y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)];
y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)];
y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)];
y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)];
y[(i+12)]=x1[(i+12)]+x2[(i+12)]+x3[(i+12)];
y[(i+13)]=x1[(i+13)]+x2[(i+13)]+x3[(i+13)];
y[(i+14)]=x1[(i+14)]+x2[(i+14)]+x3[(i+14)];
y[(i+15)]=x1[(i+15)]+x2[(i+15)]+x3[(i+15)];
y[(i+16)]=x1[(i+16)]+x2[(i+16)]+x3[(i+16)];
y[(i+17)]=x1[(i+17)]+x2[(i+17)]+x3[(i+17)];
y[(i+18)]=x1[(i+18)]+x2[(i+18)]+x3[(i+18)];
y[(i+19)]=x1[(i+19)]+x2[(i+19)]+x3[(i+19)];
}
for (i=n-((n-1)%20)-1; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
}
} else {
// parallelize=True, ufactor=12
if ((((int)(x1)|(int)(x2)|(int)(x3)|(int)(y)) & 0xF) == 0) {
__alignx(16,x1);
__alignx(16,x2);
__alignx(16,x3);
__alignx(16,y);
#pragma omp parallel for
for (i=0; i<=n-12; i=i+12) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)];
y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)];
y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)];
y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)];
y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)];
y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)];
y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)];
y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)];
y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)];
}
for (i=n-((n-1)%12)-1; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
} else {
#pragma omp parallel for
for (i=0; i<=n-12; i=i+12) {
y[i]=x1[i]+x2[i]+x3[i];
y[(i+1)]=x1[(i+1)]+x2[(i+1)]+x3[(i+1)];
y[(i+2)]=x1[(i+2)]+x2[(i+2)]+x3[(i+2)];
y[(i+3)]=x1[(i+3)]+x2[(i+3)]+x3[(i+3)];
y[(i+4)]=x1[(i+4)]+x2[(i+4)]+x3[(i+4)];
y[(i+5)]=x1[(i+5)]+x2[(i+5)]+x3[(i+5)];
y[(i+6)]=x1[(i+6)]+x2[(i+6)]+x3[(i+6)];
y[(i+7)]=x1[(i+7)]+x2[(i+7)]+x3[(i+7)];
y[(i+8)]=x1[(i+8)]+x2[(i+8)]+x3[(i+8)];
y[(i+9)]=x1[(i+9)]+x2[(i+9)]+x3[(i+9)];
y[(i+10)]=x1[(i+10)]+x2[(i+10)]+x3[(i+10)];
y[(i+11)]=x1[(i+11)]+x2[(i+11)]+x3[(i+11)];
}
for (i=n-((n-1)%12)-1; i<=n-1; i=i+1)
y[i]=x1[i]+x2[i]+x3[i];
}
}
#else
printf("error\n");
exit(1);
#endif
}
orio_t_end = getClock();
orio_t_total = orio_t_end - orio_t_start;
orio_t_total = orio_t_total / REPS;
double mflops = (8.0*N)/(orio_t_total*1000000);
#ifdef TEST
{
int i;
for (i=0; i<=n-1; i++) {
if (i%10 == 0)
printf("\n");
printf("%f ",y[i]);
}
}
#else
printf("%f\t%f\n", orio_t_total, mflops);
#endif
return y[0];
}
|
if-1.c | /* { dg-do compile } */
void
foo (int a, int b, int *p, int *q)
{
int i;
#pragma omp parallel if (a)
;
#pragma omp parallel if (parallel:a)
;
#pragma omp parallel for simd if (a)
for (i = 0; i < 16; i++)
;
#pragma omp parallel for simd if (parallel : a)
for (i = 0; i < 16; i++)
;
#pragma omp task if (a)
;
#pragma omp task if (task: a)
;
#pragma omp taskloop if (a)
for (i = 0; i < 16; i++)
;
#pragma omp taskloop if (taskloop : a)
for (i = 0; i < 16; i++)
;
#pragma omp target if (a)
;
#pragma omp target if (target: a)
;
#pragma omp target teams distribute parallel for simd if (a)
for (i = 0; i < 16; i++)
;
#pragma omp target teams distribute parallel for simd if (parallel : a) if (target: b)
for (i = 0; i < 16; i++)
;
#pragma omp target data if (a) map (p[0:2])
;
#pragma omp target data if (target data: a) map (p[0:2])
;
#pragma omp target enter data if (a) map (to: p[0:2])
#pragma omp target enter data if (target enter data: a) map (to: p[0:2])
#pragma omp target exit data if (a) map (from: p[0:2])
#pragma omp target exit data if (target exit data: a) map (from: p[0:2])
#pragma omp target update if (a) to (q[0:3])
#pragma omp target update if (target update:a) to (q[0:3])
}
|
GB_unop__minv_bool_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_bool_bool)
// op(A') function: GB (_unop_tran__minv_bool_bool)
// C type: bool
// A type: bool
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = true ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_bool_bool)
(
bool *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = true ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
; ;
; ;
Cx [p] = true ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_bool_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
shear.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS H H EEEEE AAA RRRR %
% SS H H E A A R R %
% SSS HHHHH EEE AAAAA RRRR %
% SS H H E A A R R %
% SSSSS H H EEEEE A A R R %
% %
% %
% MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The XShearImage() and YShearImage() methods are based on the paper "A Fast
% Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics
% Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/matrix.h"
#include "MagickCore/memory_.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/shear.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C r o p T o F i t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropToFitImage() crops the sheared image as determined by the bounding box
% as defined by width and height and shearing angles.
%
% The format of the CropToFitImage method is:
%
% MagickBooleanType CropToFitImage(Image **image,
% const double x_shear,const double x_shear,
% const double width,const double height,
% const MagickBooleanType rotate,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear, width, height: Defines a region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CropToFitImage(Image **image,
const double x_shear,const double y_shear,
const double width,const double height,
const MagickBooleanType rotate,ExceptionInfo *exception)
{
Image
*crop_image;
PointInfo
extent[4],
min,
max;
RectangleInfo
geometry,
page;
register ssize_t
i;
/*
Calculate the rotated image size.
*/
extent[0].x=(double) (-width/2.0);
extent[0].y=(double) (-height/2.0);
extent[1].x=(double) width/2.0;
extent[1].y=(double) (-height/2.0);
extent[2].x=(double) (-width/2.0);
extent[2].y=(double) height/2.0;
extent[3].x=(double) width/2.0;
extent[3].y=(double) height/2.0;
for (i=0; i < 4; i++)
{
extent[i].x+=x_shear*extent[i].y;
extent[i].y+=y_shear*extent[i].x;
if (rotate != MagickFalse)
extent[i].x+=x_shear*extent[i].y;
extent[i].x+=(double) (*image)->columns/2.0;
extent[i].y+=(double) (*image)->rows/2.0;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
geometry.x=(ssize_t) ceil(min.x-0.5);
geometry.y=(ssize_t) ceil(min.y-0.5);
geometry.width=(size_t) floor(max.x-min.x+0.5);
geometry.height=(size_t) floor(max.y-min.y+0.5);
page=(*image)->page;
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
crop_image=CropImage(*image,&geometry,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
crop_image->page=page;
*image=DestroyImage(*image);
*image=crop_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s k e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeskewImage() removes skew from the image. Skew is an artifact that
% occurs in scanned images because of the camera being misaligned,
% imperfections in the scanning or surface, or simply because the paper was
% not placed completely flat when scanned.
%
% The result will be auto-croped if the artifact "deskew:auto-crop" is
% defined, while the amount the image is to be deskewed, in degrees is also
% saved as the artifact "deskew:angle".
%
% The format of the DeskewImage method is:
%
% Image *DeskewImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: separate background from foreground.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void RadonProjection(const Image *image,MatrixInfo *source_matrixs,
MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection)
{
MatrixInfo
*swap;
register MatrixInfo
*p,
*q;
register ssize_t
x;
size_t
step;
p=source_matrixs;
q=destination_matrixs;
for (step=1; step < GetMatrixColumns(p); step*=2)
{
for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step)
{
register ssize_t
i;
ssize_t
y;
unsigned short
element,
neighbor;
for (i=0; i < (ssize_t) step; i++)
{
for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) GetMatrixRows(p); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
}
}
swap=p;
p=q;
q=swap;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,GetMatrixColumns(p),1)
#endif
for (x=0; x < (ssize_t) GetMatrixColumns(p); x++)
{
register ssize_t
y;
size_t
sum;
sum=0;
for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++)
{
ssize_t
delta;
unsigned short
element,
neighbor;
if (GetMatrixElement(p,x,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse)
continue;
delta=(ssize_t) element-(ssize_t) neighbor;
sum+=delta*delta;
}
projection[GetMatrixColumns(p)+sign*x-1]=sum;
}
}
static MagickBooleanType RadonTransform(const Image *image,
const double threshold,size_t *projection,ExceptionInfo *exception)
{
CacheView
*image_view;
MatrixInfo
*destination_matrixs,
*source_matrixs;
MagickBooleanType
status;
size_t
count,
width;
ssize_t
j,
y;
unsigned char
c;
unsigned short
bits[256];
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short),
exception);
destination_matrixs=AcquireMatrixInfo(width,image->rows,
sizeof(unsigned short),exception);
if ((source_matrixs == (MatrixInfo *) NULL) ||
(destination_matrixs == (MatrixInfo *) NULL))
{
if (destination_matrixs != (MatrixInfo *) NULL)
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
if (source_matrixs != (MatrixInfo *) NULL)
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickFalse);
}
if (NullMatrix(source_matrixs) == MagickFalse)
{
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickFalse);
}
for (j=0; j < 256; j++)
{
c=(unsigned char) j;
for (count=0; c != 0; c>>=1)
count+=c & 0x01;
bits[j]=(unsigned short) count;
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=(ssize_t) (image->columns+7)/8;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(image,p) < threshold) ||
((MagickRealType) GetPixelGreen(image,p) < threshold) ||
((MagickRealType) GetPixelBlue(image,p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrixs,--i,y,&value);
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrixs,--i,y,&value);
}
}
RadonProjection(image,source_matrixs,destination_matrixs,-1,projection);
(void) NullMatrix(source_matrixs);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(image,p) < threshold) ||
((MagickRealType) GetPixelGreen(image,p) < threshold) ||
((MagickRealType) GetPixelBlue(image,p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrixs,i++,y,&value);
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrixs,i++,y,&value);
}
}
RadonProjection(image,source_matrixs,destination_matrixs,1,projection);
image_view=DestroyCacheView(image_view);
destination_matrixs=DestroyMatrixInfo(destination_matrixs);
source_matrixs=DestroyMatrixInfo(source_matrixs);
return(MagickTrue);
}
static void GetImageBackgroundColor(Image *image,const ssize_t offset,
ExceptionInfo *exception)
{
CacheView
*image_view;
PixelInfo
background;
double
count;
ssize_t
y;
/*
Compute average background color.
*/
if (offset <= 0)
return;
GetPixelInfo(image,&background);
count=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if ((y >= offset) && (y < ((ssize_t) image->rows-offset)))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x >= offset) && (x < ((ssize_t) image->columns-offset)))
continue;
background.red+=QuantumScale*GetPixelRed(image,p);
background.green+=QuantumScale*GetPixelGreen(image,p);
background.blue+=QuantumScale*GetPixelBlue(image,p);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
background.alpha+=QuantumScale*GetPixelAlpha(image,p);
count++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
image->background_color.red=(double) ClampToQuantum(QuantumRange*
background.red/count);
image->background_color.green=(double) ClampToQuantum(QuantumRange*
background.green/count);
image->background_color.blue=(double) ClampToQuantum(QuantumRange*
background.blue/count);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->background_color.alpha=(double) ClampToQuantum(QuantumRange*
background.alpha/count);
}
MagickExport Image *DeskewImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
AffineMatrix
affine_matrix;
const char
*artifact;
double
degrees;
Image
*clone_image,
*crop_image,
*deskew_image,
*median_image;
MagickBooleanType
status;
RectangleInfo
geometry;
register ssize_t
i;
size_t
max_projection,
*projection,
width;
ssize_t
skew;
/*
Compute deskew angle.
*/
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1),
sizeof(*projection));
if (projection == (size_t *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
status=RadonTransform(image,threshold,projection,exception);
if (status == MagickFalse)
{
projection=(size_t *) RelinquishMagickMemory(projection);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
max_projection=0;
skew=0;
for (i=0; i < (ssize_t) (2*width-1); i++)
{
if (projection[i] > max_projection)
{
skew=i-(ssize_t) width+1;
max_projection=projection[i];
}
}
projection=(size_t *) RelinquishMagickMemory(projection);
degrees=RadiansToDegrees(-atan((double) skew/width/8));
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Deskew angle: %g",degrees);
/*
Deskew image.
*/
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
{
char
angle[MagickPathExtent];
(void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees);
(void) SetImageArtifact(clone_image,"deskew:angle",angle);
}
(void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod,
exception);
affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0))));
affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.tx=0.0;
affine_matrix.ty=0.0;
artifact=GetImageArtifact(image,"deskew:auto-crop");
if (IsStringTrue(artifact) == MagickFalse)
{
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
return(deskew_image);
}
/*
Auto-crop image.
*/
GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact),
exception);
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
if (deskew_image == (Image *) NULL)
return((Image *) NULL);
median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception);
if (median_image == (Image *) NULL)
{
deskew_image=DestroyImage(deskew_image);
return((Image *) NULL);
}
geometry=GetImageBoundingBox(median_image,exception);
median_image=DestroyImage(median_image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: "
"%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
crop_image=CropImage(deskew_image,&geometry,exception);
deskew_image=DestroyImage(deskew_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e g r a l R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralRotateImage() rotates the image an integral of 90 degrees. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the rotated image.
%
% The format of the IntegralRotateImage method is:
%
% Image *IntegralRotateImage(const Image *image,size_t rotations,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o rotations: Specifies the number of 90 degree rotations.
%
*/
MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations,
ExceptionInfo *exception)
{
#define RotateImageTag "Rotate/Image"
CacheView
*image_view,
*rotate_view;
Image
*rotate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
/*
Initialize rotated image attributes.
*/
assert(image != (Image *) NULL);
page=image->page;
rotations%=4;
switch (rotations)
{
case 0:
{
rotate_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case 2:
{
rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
break;
}
case 1:
case 3:
{
rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
break;
}
}
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
/*
Integral rotate the image.
*/
status=MagickTrue;
progress=0;
if (rotations != 0)
{
image_view=AcquireVirtualCacheView(image,exception);
rotate_view=AcquireAuthenticCacheView(rotate_image,exception);
}
switch (rotations)
{
case 1:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 90 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
tile_x=0;
for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (y=0; y < (ssize_t) width; y++)
{
register const Quantum
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t)
(rotate_image->columns-(tile_y+height)),y+tile_x,height,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image);
for (x=0; x < (ssize_t) height; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,tile_pixels[i],q);
}
tile_pixels-=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
break;
}
case 2:
{
register ssize_t
y;
/*
Rotate 180 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y-
1),image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(rotate_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(rotate_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
case 3:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 270 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
register ssize_t
tile_x;
if (status == MagickFalse)
continue;
tile_x=0;
for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (y=0; y < (ssize_t) width; y++)
{
register const Quantum
*magick_restrict tile_pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+
rotate_image->rows-(tile_x+width)),height,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+((width-1)-y)*GetPixelChannels(image);
for (x=0; x < (ssize_t) height; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(rotate_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(rotate_image,channel,tile_pixels[i],q);
}
tile_pixels+=width*GetPixelChannels(image);
q+=GetPixelChannels(rotate_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_IntegralRotateImage)
#endif
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
default:
break;
}
if (rotations != 0)
{
rotate_view=DestroyCacheView(rotate_view);
image_view=DestroyCacheView(image_view);
}
rotate_image->type=image->type;
rotate_image->page=page;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ X S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% XShearImage() shears the image in the X direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a vertical
% Y-axis. X shears will widen an image creating 'empty' triangles on the left
% and right sides of the source image.
%
% The format of the XShearImage method is:
%
% MagickBooleanType XShearImage(Image *image,const double degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A double representing the shearing angle along the X
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType XShearImage(Image *image,const double degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define XShearImageTag "XShear/Image"
typedef enum
{
LEFT,
RIGHT
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
background;
ssize_t
y;
/*
X shear image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
background=image->background_color;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
PixelInfo
pixel,
source,
destination;
double
area,
displacement;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1,
exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=x_offset*GetPixelChannels(image);
displacement=degrees*(double) (y-height/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=RIGHT;
else
{
displacement*=(-1.0);
direction=LEFT;
}
step=(ssize_t) floor((double) displacement);
area=(double) (displacement-step);
step++;
pixel=background;
GetPixelInfo(image,&source);
GetPixelInfo(image,&destination);
switch (direction)
{
case LEFT:
{
/*
Transfer pixels left-to-right.
*/
if (step > x_offset)
break;
q=p-step*GetPixelChannels(image);
for (i=0; i < (ssize_t) width; i++)
{
if ((x_offset+i) < step)
{
p+=GetPixelChannels(image);
GetPixelInfoPixel(image,p,&pixel);
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
q+=GetPixelChannels(image);
for (i=0; i < (step-1); i++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
break;
}
case RIGHT:
{
/*
Transfer pixels right-to-left.
*/
p+=width*GetPixelChannels(image);
q=p+step*GetPixelChannels(image);
for (i=0; i < (ssize_t) width; i++)
{
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
if ((size_t) (x_offset+width+step-i) > image->columns)
continue;
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&destination,q);
for (i=0; i < (step-1); i++)
{
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&background,q);
}
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,XShearImageTag,progress,height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Y S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% YShearImage shears the image in the Y direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a
% horizontal X-axis. Y shears will increase the height of an image creating
% 'empty' triangles on the top and bottom of the source image.
%
% The format of the YShearImage method is:
%
% MagickBooleanType YShearImage(Image *image,const double degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A double representing the shearing angle along the Y
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType YShearImage(Image *image,const double degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define YShearImageTag "YShear/Image"
typedef enum
{
UP,
DOWN
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
background;
ssize_t
x;
/*
Y Shear image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
progress=0;
background=image->background_color;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,width,1)
#endif
for (x=0; x < (ssize_t) width; x++)
{
double
area,
displacement;
PixelInfo
pixel,
source,
destination;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows,
exception);
if (p == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
p+=y_offset*GetPixelChannels(image);
displacement=degrees*(double) (x-width/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=DOWN;
else
{
displacement*=(-1.0);
direction=UP;
}
step=(ssize_t) floor((double) displacement);
area=(double) (displacement-step);
step++;
pixel=background;
GetPixelInfo(image,&source);
GetPixelInfo(image,&destination);
switch (direction)
{
case UP:
{
/*
Transfer pixels top-to-bottom.
*/
if (step > y_offset)
break;
q=p-step*GetPixelChannels(image);
for (i=0; i < (ssize_t) height; i++)
{
if ((y_offset+i) < step)
{
p+=GetPixelChannels(image);
GetPixelInfoPixel(image,p,&pixel);
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,
&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
SetPixelViaPixelInfo(image,&destination,q);
q+=GetPixelChannels(image);
for (i=0; i < (step-1); i++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
break;
}
case DOWN:
{
/*
Transfer pixels bottom-to-top.
*/
p+=height*GetPixelChannels(image);
q=p+step*GetPixelChannels(image);
for (i=0; i < (ssize_t) height; i++)
{
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
if ((size_t) (y_offset+height+step-i) > image->rows)
continue;
GetPixelInfoPixel(image,p,&source);
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&source,(double) GetPixelAlpha(image,p),area,
&destination);
SetPixelViaPixelInfo(image,&destination,q);
GetPixelInfoPixel(image,p,&pixel);
}
CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha,
&background,(double) background.alpha,area,&destination);
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&destination,q);
for (i=0; i < (step-1); i++)
{
q-=GetPixelChannels(image);
SetPixelViaPixelInfo(image,&background,q);
}
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,YShearImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearImage() creates a new image that is a shear_image copy of an existing
% one. Shearing slides one edge of an image along the X or Y axis, creating
% a parallelogram. An X direction shear slides an edge along the X axis,
% while a Y direction shear slides an edge along the Y axis. The amount of
% the shear is controlled by a shear angle. For X direction shears, x_shear
% is measured relative to the Y axis, and similarly, for Y direction shears
% y_shear is measured relative to the X axis. Empty triangles left over from
% shearing the image are filled with the background color defined by member
% 'background_color' of the image.. ShearImage() allocates the memory
% necessary for the new Image structure and returns a pointer to the new image.
%
% ShearImage() is based on the paper "A Fast Algorithm for General Raster
% Rotatation" by Alan W. Paeth.
%
% The format of the ShearImage method is:
%
% Image *ShearImage(const Image *image,const double x_shear,
% const double y_shear,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear: Specifies the number of degrees to shear the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearImage(const Image *image,const double x_shear,
const double y_shear,ExceptionInfo *exception)
{
Image
*integral_image,
*shear_image;
MagickBooleanType
status;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
/*
Initialize shear angle.
*/
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0))));
shear.y=tan(DegreesToRadians(fmod(y_shear,360.0)));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception);
/*
Compute image size.
*/
bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5);
bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)-
image->columns)/2.0-0.5);
bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)-
image->rows)/2.0-0.5);
/*
Surround image with border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
shear_image=BorderImage(integral_image,&border_info,image->compose,exception);
integral_image=DestroyImage(integral_image);
if (shear_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Shear the image.
*/
if (shear_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception);
status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x,
(ssize_t) (shear_image->rows-image->rows)/2,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t)
(shear_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType)
image->columns,(MagickRealType) image->rows,MagickFalse,exception);
shear_image->alpha_trait=image->alpha_trait;
shear_image->compose=image->compose;
shear_image->page.width=0;
shear_image->page.height=0;
if (status == MagickFalse)
shear_image=DestroyImage(shear_image);
return(shear_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearRotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. ShearRotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% ShearRotateImage() is based on the paper "A Fast Algorithm for General
% Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a
% similar method based on the Paeth paper written by Michael Halle of the
% Spatial Imaging Group, MIT Media Lab.
%
% The format of the ShearRotateImage method is:
%
% Image *ShearRotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearRotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*integral_image,
*rotate_image;
MagickBooleanType
status;
MagickRealType
angle;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
size_t
height,
rotations,
shear_width,
width;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
if (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
/*
Calculate shear equations.
*/
integral_image=IntegralRotateImage(image,rotations,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse)
{
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception);
/*
Compute maximum bounds for 3 shear operations.
*/
width=integral_image->columns;
height=integral_image->rows;
bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5);
bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5);
shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+
bounds.width+0.5);
bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width :
bounds.width-shear_width+2)/2.0+0.5);
bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5);
/*
Surround image with a border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
rotate_image=BorderImage(integral_image,&border_info,image->compose,
exception);
integral_image=DestroyImage(integral_image);
if (rotate_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Rotate the image.
*/
status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t)
(rotate_image->rows-height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t)
(rotate_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t)
(rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows-
bounds.height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width,
(MagickRealType) height,MagickTrue,exception);
rotate_image->alpha_trait=image->alpha_trait;
rotate_image->compose=image->compose;
rotate_image->page.width=0;
rotate_image->page.height=0;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
|
lbfgsbsolver.h | // CppNumericalSolver
// based on:
// L-BFGS-B: A LIMITED MEMORY ALGORITHM FOR BOUND CONSTRAINED OPTIMIZATION
// Richard H. Byrd, Peihuang Lu, Jorge Nocedal and Ciyou Zhu
#include <iostream>
#include <list>
#include <Eigen/LU>
#include "isolver.h"
#include "../boundedproblem.h"
#include "../linesearch/morethuente.h"
#ifndef LBFGSBSOLVER_H
#define LBFGSBSOLVER_H
namespace cppoptlib {
template<typename TProblem>
class LbfgsbSolver : public ISolver<TProblem, 1> {
public:
using Superclass = ISolver<TProblem, 1>;
using typename Superclass::Scalar;
using typename Superclass::TVector;
using MatrixType = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using VariableTVector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
protected:
// workspace matrices
MatrixType W, M;
Scalar theta;
int DIM;
int m_historySize = 5;
/**
* @brief sort pairs (k,v) according v ascending
* @details [long description]
*
* @param v [description]
* @return [description]
*/
std::vector<int> sort_indexes(const std::vector< std::pair<int, Scalar> > &v) {
std::vector<int> idx(v.size());
for (size_t i = 0; i != idx.size(); ++i)
idx[i] = v[i].first;
sort(idx.begin(), idx.end(), [&v](size_t i1, size_t i2) {
return v[i1].second < v[i2].second;
});
return idx;
}
/**
* @brief Algorithm CP: Computation of the generalized Cauchy point
* @details PAGE 8
*
* @param c [description]
*/
void getGeneralizedCauchyPoint(const TProblem &problem, const TVector &x, const TVector &g, TVector &x_cauchy, VariableTVector &c) {
const int DIM = x.rows();
// Given x,l,u,g, and B = \theta I-WMW
// {all t_i} = { (idx,value), ... }
// TODO: use "std::set" ?
std::vector<std::pair<int, Scalar> > SetOfT;
// the feasible set is implicitly given by "SetOfT - {t_i==0}"
TVector d = -g;
// n operations
for (int j = 0; j < DIM; j++) {
if (g(j) == 0) {
SetOfT.push_back(std::make_pair(j, std::numeric_limits<Scalar>::max()));
} else {
Scalar tmp = 0;
if (g(j) < 0) {
tmp = (x(j) - problem.upperBound()(j)) / g(j);
} else {
tmp = (x(j) - problem.lowerBound()(j)) / g(j);
}
SetOfT.push_back(std::make_pair(j, tmp));
if (tmp == 0) d(j) = 0;
}
}
// sortedindices [1,0,2] means the minimal element is on the 1-st entry
std::vector<int> sortedIndices = sort_indexes(SetOfT);
x_cauchy = x;
// Initialize
// p := W^Scalar*p
VariableTVector p = (W.transpose() * d); // (2mn operations)
// c := 0
c = VariableTVector::Zero(W.cols());
// f' := g^Scalar*d = -d^Td
Scalar f_prime = -d.dot(d); // (n operations)
// f'' := \theta*d^Scalar*d-d^Scalar*W*M*W^Scalar*d = -\theta*f' - p^Scalar*M*p
Scalar f_doubleprime = (Scalar)(-1.0 * theta) * f_prime - p.dot(M * p); // (O(m^2) operations)
f_doubleprime = std::max(std::numeric_limits<Scalar>::epsilon(), f_doubleprime);
Scalar f_dp_orig = f_doubleprime;
// \delta t_min := -f'/f''
Scalar dt_min = -f_prime / f_doubleprime;
// t_old := 0
Scalar t_old = 0;
// b := argmin {t_i , t_i >0}
int i = 0;
for (int j = 0; j < DIM; j++) {
i = j;
if (SetOfT[sortedIndices[j]].second > 0)
break;
}
int b = sortedIndices[i];
// see below
// t := min{t_i : i in F}
Scalar t = SetOfT[b].second;
// \delta Scalar := t - 0
Scalar dt = t ;
// examination of subsequent segments
while ((dt_min >= dt) && (i < DIM)) {
if (d(b) > 0)
x_cauchy(b) = problem.upperBound()(b);
else if (d(b) < 0)
x_cauchy(b) = problem.lowerBound()(b);
// z_b = x_p^{cp} - x_b
Scalar zb = x_cauchy(b) - x(b);
// c := c +\delta t*p
c += dt * p;
// cache
VariableTVector wbt = W.row(b);
f_prime += dt * f_doubleprime + (Scalar) g(b) * g(b) + (Scalar) theta * g(b) * zb - (Scalar) g(b) *
wbt.transpose() * (M * c);
f_doubleprime += (Scalar) - 1.0 * theta * g(b) * g(b)
- (Scalar) 2.0 * (g(b) * (wbt.dot(M * p)))
- (Scalar) g(b) * g(b) * wbt.transpose() * (M * wbt);
f_doubleprime = std::max(std::numeric_limits<Scalar>::epsilon() * f_dp_orig, f_doubleprime);
p += g(b) * wbt.transpose();
d(b) = 0;
dt_min = -f_prime / f_doubleprime;
t_old = t;
++i;
if (i < DIM) {
b = sortedIndices[i];
t = SetOfT[b].second;
dt = t - t_old;
}
}
dt_min = std::max(dt_min, (Scalar)0.0);
t_old += dt_min;
#pragma omp parallel for
for (int ii = i; ii < x_cauchy.rows(); ii++) {
x_cauchy(sortedIndices[ii]) = x(sortedIndices[ii]) + t_old * d(sortedIndices[ii]);
}
c += dt_min * p;
}
/**
* @brief find alpha* = max {a : a <= 1 and l_i-xc_i <= a*d_i <= u_i-xc_i}
* @details [long description]
*
* @param FreeVariables [description]
* @return [description]
*/
Scalar findAlpha(const TProblem &problem, TVector &x_cp, VariableTVector &du, std::vector<int> &FreeVariables) {
Scalar alphastar = 1;
const unsigned int n = FreeVariables.size();
assert(du.rows() == n);
for (unsigned int i = 0; i < n; i++) {
if (du(i) > 0) {
alphastar = std::min(alphastar, (problem.upperBound()(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i));
} else {
alphastar = std::min(alphastar, (problem.lowerBound()(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i));
}
}
return alphastar;
}
/**
* @brief solving unbounded probelm
* @details [long description]
*
* @param SubspaceMin [description]
*/
void SubspaceMinimization(const TProblem &problem, TVector &x_cauchy, TVector &x, VariableTVector &c, TVector &g,
TVector &SubspaceMin) {
Scalar theta_inverse = 1 / theta;
std::vector<int> FreeVariablesIndex;
for (int i = 0; i < x_cauchy.rows(); i++) {
if ((x_cauchy(i) != problem.upperBound()(i)) && (x_cauchy(i) != problem.lowerBound()(i))) {
FreeVariablesIndex.push_back(i);
}
}
const int FreeVarCount = FreeVariablesIndex.size();
MatrixType WZ = MatrixType::Zero(W.cols(), FreeVarCount);
for (int i = 0; i < FreeVarCount; i++)
WZ.col(i) = W.row(FreeVariablesIndex[i]);
TVector rr = (g + theta * (x_cauchy - x) - W * (M * c));
// r=r(FreeVariables);
MatrixType r = MatrixType::Zero(FreeVarCount, 1);
for (int i = 0; i < FreeVarCount; i++)
r.row(i) = rr.row(FreeVariablesIndex[i]);
// STEP 2: "v = w^T*Z*r" and STEP 3: "v = M*v"
VariableTVector v = M * (WZ * r);
// STEP 4: N = 1/theta*W^T*Z*(W^T*Z)^T
MatrixType N = theta_inverse * WZ * WZ.transpose();
// N = I - MN
N = MatrixType::Identity(N.rows(), N.rows()) - M * N;
// STEP: 5
// v = N^{-1}*v
if (v.size() > 0)
v = N.lu().solve(v);
// STEP: 6
// HERE IS A MISTAKE IN THE ORIGINAL PAPER!
VariableTVector du = -theta_inverse * r - theta_inverse * theta_inverse * WZ.transpose() * v;
// STEP: 7
Scalar alpha_star = findAlpha(problem, x_cauchy, du, FreeVariablesIndex);
// STEP: 8
VariableTVector dStar = alpha_star * du;
SubspaceMin = x_cauchy;
for (int i = 0; i < FreeVarCount; i++) {
SubspaceMin(FreeVariablesIndex[i]) = SubspaceMin(FreeVariablesIndex[i]) + dStar(i);
}
}
public:
void setHistorySize(const int hs) { m_historySize = hs; }
void minimize(TProblem &problem, TVector &x0) {
if(!problem.isValid(x0))
std::cerr << "start with invalid x0" << std::endl;
DIM = x0.rows();
theta = 1.0;
W = MatrixType::Zero(DIM, 0);
M = MatrixType::Zero(0, 0);
MatrixType yHistory = MatrixType::Zero(DIM, 0);
MatrixType sHistory = MatrixType::Zero(DIM, 0);
TVector x = x0, g = x0;
Scalar f = problem.value(x);
problem.gradient(x, g);
// conv. crit.
auto noConvergence =
[&](TVector &x, TVector &g)->bool {
return (((x - g).cwiseMax(problem.lowerBound()).cwiseMin(problem.upperBound()) - x).template lpNorm<Eigen::Infinity>() >= 1e-4);
};
this->m_current.reset();
this->m_status = Status::Continue;
while (problem.callback(this->m_current, x) && noConvergence(x, g) && (this->m_status == Status::Continue)) {
Scalar f_old = f;
TVector x_old = x;
TVector g_old = g;
// STEP 2: compute the cauchy point
TVector CauchyPoint = TVector::Zero(DIM);
VariableTVector c = VariableTVector::Zero(W.cols());
getGeneralizedCauchyPoint(problem, x, g, CauchyPoint, c);
// STEP 3: compute a search direction d_k by the primal method for the sub-problem
TVector SubspaceMin;
SubspaceMinimization(problem, CauchyPoint, x, c, g, SubspaceMin);
// STEP 4: perform linesearch and STEP 5: compute gradient
Scalar alpha_init = 1.0;
const Scalar rate = MoreThuente<TProblem, 1>::linesearch(x, SubspaceMin-x , problem, alpha_init);
// update current guess and function information
x = x - rate*(x-SubspaceMin);
f = problem.value(x);
problem.gradient(x, g);
// prepare for next iteration
TVector newY = g - g_old;
TVector newS = x - x_old;
// STEP 6:
Scalar test = newS.dot(newY);
test = (test < 0) ? -1.0 * test : test;
if (test > 1e-7 * newY.squaredNorm()) {
if (yHistory.cols() < m_historySize) {
yHistory.conservativeResize(DIM, yHistory.cols() + 1);
sHistory.conservativeResize(DIM, sHistory.cols() + 1);
} else {
yHistory.leftCols(m_historySize - 1) = yHistory.rightCols(m_historySize - 1).eval();
sHistory.leftCols(m_historySize - 1) = sHistory.rightCols(m_historySize - 1).eval();
}
yHistory.rightCols(1) = newY;
sHistory.rightCols(1) = newS;
// STEP 7:
theta = (Scalar)(newY.transpose() * newY) / (newY.transpose() * newS);
W = MatrixType::Zero(yHistory.rows(), yHistory.cols() + sHistory.cols());
W << yHistory, (theta * sHistory);
MatrixType A = sHistory.transpose() * yHistory;
MatrixType L = A.template triangularView<Eigen::StrictlyLower>();
MatrixType MM(A.rows() + L.rows(), A.rows() + L.cols());
MatrixType D = -1 * A.diagonal().asDiagonal();
MM << D, L.transpose(), L, ((sHistory.transpose() * sHistory) * theta);
M = MM.inverse();
}
if (fabs(f_old - f) < 1e-8) {
// successive function values too similar
break;
}
++this->m_current.iterations;
this->m_current.gradNorm = g.norm();
this->m_status = checkConvergence(this->m_stop, this->m_current);
}
x0 = x;
if (this->m_debug > DebugLevel::None) {
std::cout << "Stop status was: " << this->m_status << std::endl;
std::cout << "Stop criteria were: " << std::endl << this->m_stop << std::endl;
std::cout << "Current values are: " << std::endl << this->m_current << std::endl;
}
}
};
}
/* namespace cppoptlib */
#endif /* LBFGSBSOLVER_H_ */
|
hsi_matrix.h | #ifndef HSI_MATRIX_H
#define HSI_MATRIX_H
#include <stdio.h>
#include <math.h>
#ifndef RGB_MAX
#define RGB_MAX 255.0
#endif
#ifndef INTENSITY_ALPHA
#define INTENSITY_ALPHA 10
#endif
#ifndef INTENSITY_FACTOR
#define INTENSITY_FACTOR ((T) RGB_MAX / INTENSITY_ALPHA)
#endif
#ifndef HUE_TRESHOLD
#define HUE_TRESHOLD 0.5
#endif
#ifndef PI
#define PI 3.141592653589793
#endif
/*
* Represents HSI matrix representing HSI image values. H stands for hue, S stands for saturation and I stands for intensity.
* HSIMatrix is a 2x2 symetric matrix with hue value as top-left element, saturation values as both antidiagonal elements and
* the intensity value as the bottom-right element.
*/
template<typename T>
struct HSIMatrix {
T h;
T s;
T i;
/*
* Default constructor of HSIMatrix object. Each elements are set to 0.
*/
__host__ __device__ HSIMatrix() : HSIMatrix(0, 0, 0) {}
/*
* Constructs HSIMatrix from given values representing H, S and I values.
*/
__host__ __device__ HSIMatrix(T h, T s, T i) : h(h), s(s), i(i) {}
/*
* Converts RGB image to vector containing HSIMatrix elements.
*/
template<typename S>
static void __host__ __device__ rgb2HSIMatrixVector(S *r, S *g, S *b, HSIMatrix<T> *vector, int size);
/*
* Converts vector containing HSIMatrix elements to RGB image values. R values will be stored on memory loaction r,
* G values will be stored on memory loaction b and B values will be stored on memory location b. These addresses
* should be allocated with the size of the size parameter.
*/
template<typename S>
static void __host__ __device__ HSIMatrixVector2rgb(HSIMatrix<T> *vector, S *r, S *g, S *b, int size);
/*
* Prints HSIMatrix object to the standard output.
*/
void __host__ __device__ print();
/*
* Prints only H values from the HSIMatrix objects from given matrix.
*/
static void __host__ __device__ printMatrix(HSIMatrix<T> *matrix, int width, int height, int lda);
/*
* Prints only H values from the HSIMatrix objects from given matrix.
*/
static void __host__ __device__ printMatrixH(HSIMatrix<T> *matrix, int width, int height, int lda);
/*
* Prints only S values from the HSIMatrix objects from given matrix.
*/
static void __host__ __device__ printMatrixS(HSIMatrix<T> *matrix, int width, int height, int lda);
/*
* Prints only I values from the HSIMatrix objects from given matrix.
*/
static void __host__ __device__ printMatrixI(HSIMatrix<T> *matrix, int width, int height, int lda);
/*
* Utiliy method for finding minimum between three values of type T.
*/
static T __host__ __device__ min3(T a, T b , T c);
/*
* Returns maximum HSIMatrix in terms of lexicographic ordering.
*/
static HSIMatrix __host__ __device__ max() { return HSIMatrix(0.0, 1.0, 1.0); }
/*
* Returns minimum HSIMatrix in terms of lexicographic ordering.
*/
static HSIMatrix __host__ __device__ min() { return HSIMatrix(1.0, 0.0, 0.0); }
/*
* Comparisson operator < representing order using lexicographical cascades on the HSI-space with parameter
* alpha equal to 10.
*/
template<typename S>
friend bool __host__ __device__ operator<(HSIMatrix<S> &a, HSIMatrix<S> &b);
/*
* Comparisson operator > representing order using lexicographical cascades on the HSI-space with parameter
* alpha equal to 10.
*/
template<typename S>
friend bool __host__ __device__ operator>(HSIMatrix<S> &a, HSIMatrix<S> &b);
};
template<typename T>
bool __host__ __device__ operator<(HSIMatrix<T> &a, HSIMatrix<T> &b) {
T val1 = ceil(a.i * 255.0 / 10.0);
T val2 = ceil(b.i * 255.0 / 10.0);
if (val1 < val2) {
return true;
} else if (val1 == val2) {
if (a.s < b.s) {
return true;
} else if (a.s == b.s) {
val1 = abs(a.h);
val2 = abs(b.h);
val1 = (val1 < HUE_TRESHOLD) ? val1 : 1 - val1;
val2 = (val2 < HUE_TRESHOLD) ? val2 : 1 - val2;
return (val1 > val2);
}
}
return false;
}
template<typename T>
bool __host__ __device__ operator>(HSIMatrix<T> &a, HSIMatrix<T> &b) {
T val1 = ceil(a.i * 255.0 / 10.0);
T val2 = ceil(b.i * 255.0 / 10.0);
if (val1 > val2) {
return true;
} else if (val1 == val2) {
if (a.s > b.s) {
return true;
} else if (a.s == b.s) {
val1 = abs(a.h);
val2 = abs(b.h);
val1 = (val1 < HUE_TRESHOLD) ? val1 : 1 - val1;
val2 = (val2 < HUE_TRESHOLD) ? val2 : 1 - val2;
return (val1 < val2);
}
}
return false;
}
template<typename T>
T __host__ __device__ HSIMatrix<T>::min3(T a, T b, T c) {
if (a < b) {
if (a < c) {
return a;
} else {
return (b < c) ? b : c;
}
} else {
if (b < c) {
return b;
} else {
return (a < c) ? a : c;
}
}
}
template<typename T>
void __host__ __device__ HSIMatrix<T>::print() {
printf("(%.16f, %.16f, %.16f)", h, s, i);
}
template<typename T>
void __host__ __device__ HSIMatrix<T>::printMatrix(HSIMatrix<T> *matrix, int width, int height, int lda) {
HSIMatrix *current = matrix;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
current[j].print();
}
printf("\n");
current += lda;
}
}
template<typename T>
void __host__ __device__ HSIMatrix<T>::printMatrixH(HSIMatrix<T> *matrix, int width, int height, int lda) {
HSIMatrix *current = matrix;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
printf("%g ", current[j].h);;
}
printf("\n");
current += lda;
}
}
template<typename T>
void __host__ __device__ HSIMatrix<T>::printMatrixS(HSIMatrix<T> *matrix, int width, int height, int lda) {
HSIMatrix *current = matrix;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
printf("%g ", current[j].s);;
}
printf("\n");
current += lda;
}
}
template<typename T>
void __host__ __device__ HSIMatrix<T>::printMatrixI(HSIMatrix<T> *matrix, int width, int height, int lda) {
HSIMatrix *current = matrix;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
printf("%g ", current[j].i);;
}
printf("\n");
current += lda;
}
}
template<typename T>
template<typename S>
void __host__ __device__ HSIMatrix<T>::rgb2HSIMatrixVector(S *r, S *g, S *b, HSIMatrix<T> *vector, int size) {
#pragma omp parallel for
for (int i = 0; i < size; i++) {
T r_val = r[i];
T g_val = g[i];
T b_val = b[i];
T num = 0.5 * ((r_val - g_val) + (r_val - b_val));
T den = sqrt((r_val - g_val) * (r_val - g_val) + (r_val - b_val) * (g_val - b_val));
if (den == 0) den = FLT_EPSILON;
T theta = (T) acos(num / den);
num =min3(r_val, g_val, b_val);
den = r_val + g_val + b_val;
if (den == 0) den = FLT_EPSILON;
vector[i].i = den / 3;
vector[i].s = 1 - 3 * (num / den);
vector[i].h = (vector[i].s == 0) ? 0 : (((b_val > g_val) ? (2 * PI - theta) : theta) / (2 * PI));
vector[i].i /= RGB_MAX;
}
}
template<typename T>
template<typename S>
void __host__ __device__ HSIMatrix<T>::HSIMatrixVector2rgb(HSIMatrix<T> *vector, S *r, S *g, S *b, int size) {
T c = PI / 3;
#pragma omp parallel for
for (int i = 0; i < size; i++) {
T h_val = vector[i].h * 2 * PI;
T s_val = vector[i].s;
T i_val = vector[i].i * RGB_MAX;
if (0 <= h_val && h_val < 2 * c) {
b[i] = i_val * (1 - s_val);
r[i] = i_val * (1 + s_val * cos(h_val) / cos(c - h_val));
g[i] = 3 * i_val - (r[i] + b[i]);
} else if (2 * c <= h_val && h_val <= 4 * c) {
r[i] = i_val * (1 - s_val);
g[i] = i_val * (1 + s_val * cos(h_val - 2 * c) / cos(PI - h_val));
b[i] = 3 * i_val - (r[i] + g[i]);
} else {
g[i] = i_val * (1 - s_val);
b[i] = i_val * (1 + s_val * cos(h_val - 4 * c) / cos(5 * c - h_val));
r[i] = 3 * i_val - (b[i] + g[i]);
}
}
}
#endif
|
kernel_cpu.c | // #ifdef __cplusplus
// extern "C" {
// #endif
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// LIBRARIES
//======================================================================================================================================================150
#include <omp.h> // (in directory known to compiler) needed by openmp
#include <stdlib.h> // (in directory known to compiler) needed by malloc
#include <stdio.h> // (in directory known to compiler) needed by printf, stderr
//======================================================================================================================================================150
// COMMON
//======================================================================================================================================================150
#include "../common.h" // (in directory provided here)
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "../util/timer/timer.h" // (in directory provided here)
//========================================================================================================================================================================================================200
// KERNEL_CPU FUNCTION
//========================================================================================================================================================================================================200
void kernel_cpu(record *records, knode *knodes, long knodes_elem,
int order, long maxheight, int count,
long *currKnode, long *offset, int *keys, record *ans) {
//======================================================================================================================================================150
// Variables
//======================================================================================================================================================150
// timer
long long time0;
long long time1;
long long time2;
time0 = get_time();
//======================================================================================================================================================150
// MCPU SETUP
//======================================================================================================================================================150
int max_nthreads;
max_nthreads = omp_get_max_threads();
// printf("max # of threads = %d\n", max_nthreads);
int threadsPerBlock;
threadsPerBlock = order < 1024 ? order : 1024;
time1 = get_time();
//======================================================================================================================================================150
// PROCESS INTERACTIONS
//======================================================================================================================================================150
// private thread IDs
int thid;
int bid;
int i;
// process number of querries
#pragma omp parallel for private(i, thid)
for (bid = 0; bid < count; bid++) {
// process levels of the tree
for (i = 0; i < maxheight; i++) {
// process all leaves at each level
for (thid = 0; thid < threadsPerBlock; thid++) {
// if value is between the two keys
if ((knodes[currKnode[bid]].keys[thid]) <= keys[bid] &&
(knodes[currKnode[bid]].keys[thid + 1] > keys[bid])) {
// this conditional statement is inserted to avoid crush due
// to but in original code
// "offset[bid]" calculated below that addresses knodes[] in
// the next iteration goes outside of its bounds cause
// segmentation fault
// more specifically, values saved into knodes->indices in
// the main function are out of bounds of knodes that they
// address
if (knodes[offset[bid]].indices[thid] < knodes_elem) {
offset[bid] = knodes[offset[bid]].indices[thid];
}
}
}
// set for next tree level
currKnode[bid] = offset[bid];
}
// At this point, we have a candidate leaf node which may contain
// the target record. Check each key to hopefully find the record
// process all leaves at each level
for (thid = 0; thid < threadsPerBlock; thid++) {
if (knodes[currKnode[bid]].keys[thid] == keys[bid]) {
ans[bid].value =
records[knodes[currKnode[bid]].indices[thid]].value;
}
}
}
time2 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of CPU/MCPU KERNEL:\n");
printf("%15.12f s, %15.12f % : MCPU: SET DEVICE\n",
(float)(time1 - time0) / 1000000,
(float)(time1 - time0) / (float)(time2 - time0) * 100);
printf("%15.12f s, %15.12f % : CPU/MCPU: KERNEL\n",
(float)(time2 - time1) / 1000000,
(float)(time2 - time1) / (float)(time2 - time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float)(time2 - time0) / 1000000);
}
//========================================================================================================================================================================================================200
// END
//========================================================================================================================================================================================================200
// #ifdef __cplusplus
// }
// #endif
|
GB_subassign_03.c | //------------------------------------------------------------------------------
// GB_subassign_03: C(I,J) += scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 03: C(I,J) += scalar ; using S
// M: NULL
// Mask_comp: false
// C_replace: false
// accum: present
// A: scalar
// S: constructed
// C is not bitmap: use GB_bitmap_assign instead
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_03
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_BinaryOp accum,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ;
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (&S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_GET_C ; // C must not be bitmap
const int64_t *GB_RESTRICT Ch = C->h ;
const int64_t *GB_RESTRICT Cp = C->p ;
const bool C_is_hyper = (Ch != NULL) ;
const int64_t Cnvec = C->nvec ;
GB_GET_S ;
GB_GET_ACCUM_SCALAR ;
//--------------------------------------------------------------------------
// Method 03: C(I,J) += scalar ; using S
//--------------------------------------------------------------------------
// Time: Optimal; must visit all IxJ, so Omega(|I|*|J|) is required.
// Entries in S are found and the corresponding entry in C replaced with
// the scalar.
// Method 01 and Method 03 are very similar.
//--------------------------------------------------------------------------
// Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19)
//--------------------------------------------------------------------------
GB_SUBASSIGN_IXJ_SLICE ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC) += scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
bool found = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
if (!found)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, the scalar is present
// [. A 1]: action: ( insert )
task_pending++ ;
}
else
{
// ----[C A 1] or [X A 1]-----------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_scalar ;
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC) += scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
bool found = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
if (!found)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, the scalar is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
else
{
// both S (i,j) and A (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
gt_gtf.c | #include "gt_gtf.h"
GT_INLINE gt_gtf_entry* gt_gtf_entry_new(const uint64_t start, const uint64_t end, const gt_strand strand, gt_string* const type){
gt_gtf_entry* entry = malloc(sizeof(gt_gtf_entry));
entry->uid = 0;
entry->start = start;
entry->end = end;
entry->num_children = 0;
entry->type = type;
entry->strand = strand;
entry->gene_type = NULL;
entry->gene_id = NULL;
entry->transcript_id = NULL;
entry->length = 0;
return entry;
}
GT_INLINE void gt_gtf_entry_delete(gt_gtf_entry* const entry){
free(entry);
}
GT_INLINE gt_gtf_ref* gt_gtf_ref_new(void){
gt_gtf_ref* ref = malloc(sizeof(gt_gtf_ref));
ref->entries = gt_vector_new(GTF_DEFAULT_ENTRIES, sizeof(gt_gtf_entry*));
return ref;
}
GT_INLINE void gt_gtf_ref_delete(gt_gtf_ref* const ref){
register uint64_t s = gt_vector_get_used(ref->entries);
register uint64_t i = 0;
for(i=0; i<s; i++){
gt_gtf_entry_delete( (gt_vector_get_elm(ref->entries, i, gt_gtf_entry)));
}
gt_vector_delete(ref->entries);
free(ref);
}
GT_INLINE gt_gtf* gt_gtf_new(void){
gt_gtf* gtf = malloc(sizeof(gt_gtf));
gtf->refs = gt_shash_new();
gtf->types = gt_shash_new();
gtf->gene_ids = gt_shash_new();
gtf->transcript_ids = gt_shash_new();
gtf->gene_types = gt_shash_new();
gtf->genes = gt_shash_new();
gtf->transcripts = gt_shash_new();
return gtf;
}
GT_INLINE void gt_gtf_delete(gt_gtf* const gtf){
gt_shash_delete(gtf->refs, true);
gt_shash_delete(gtf->types, true);
gt_shash_delete(gtf->gene_ids, true);
gt_shash_delete(gtf->transcript_ids, true);
gt_shash_delete(gtf->gene_types, true);
gt_shash_delete(gtf->genes, false);
gt_shash_delete(gtf->transcripts, false);
free(gtf);
}
GT_INLINE gt_gtf_hits* gt_gtf_hits_new(void){
gt_gtf_hits* hits = malloc(sizeof(gt_gtf_hits));
hits->exon_hits = gt_vector_new(16, sizeof(gt_gtf_hit*));
hits->num_genes = 0;
hits->num_protein_coding =0;
hits->num_paired_genes =0;
return hits;
}
GT_INLINE void gt_gtf_hits_delete(gt_gtf_hits* const hits){
gt_gtf_hits_clear(hits);
gt_vector_delete(hits->exon_hits);
free(hits);
}
GT_INLINE void gt_gtf_hits_clear(gt_gtf_hits* const hits){
uint64_t i = 0;
for(i=0; i<gt_vector_get_used(hits->exon_hits); i++){
gt_gtf_hit* hit = *gt_vector_get_elm(hits->exon_hits, i, gt_gtf_hit*);
gt_gtf_hit_delete(hit);
}
hits->num_genes = 0;
hits->num_protein_coding =0;
hits->num_paired_genes =0;
hits->junction_hit_ration = 0.0;
gt_vector_clear(hits->exon_hits);
}
GT_INLINE gt_gtf_count_parms* gt_gtf_count_params_new(bool coverage){
gt_gtf_count_parms* p = gt_malloc_(1, sizeof(gt_gtf_count_parms), false, false);
p->num_maps = 0;
p->exon_overlap = 0;
p->unweighted_counts = true;
p->single_pair_counts = false;
p->num_junctions = 0;
p->count_bases = false;
p->num_annotated_junctions = 0;
if(coverage){
p->single_transcript_coverage = GT_GTF_INIT_COVERAGE();
p->gene_body_coverage = GT_GTF_INIT_COVERAGE();
}else{
p->single_transcript_coverage = NULL;
p->gene_body_coverage = NULL;
}
return p;
}
GT_INLINE void gt_gtf_count_params_delete(gt_gtf_count_parms* params){
if(params->single_transcript_coverage != NULL){
free(params->single_transcript_coverage);
}
if(params->gene_body_coverage != NULL){
free(params->gene_body_coverage);
}
free(params);
}
GT_INLINE gt_string* gt_gtf_get_type(const gt_gtf* const gtf, char* const type){
if(!gt_gtf_contains_type(gtf, type)){
gt_string* s = gt_string_set_new(type);
gt_shash_insert_string(gtf->types, type, s);
}
return gt_shash_get(gtf->types, type, gt_string);
}
GT_INLINE bool gt_gtf_contains_type(const gt_gtf* const gtf, char* const name){
return gt_shash_is_contained(gtf->types, name);
}
GT_INLINE gt_gtf_ref* gt_gtf_get_ref(const gt_gtf* const gtf, char* const name){
if(!gt_gtf_contains_ref(gtf, name)){
gt_gtf_ref* rr = gt_gtf_ref_new();
gt_shash_insert(gtf->refs, name, rr, gt_gtf_ref*);
}
return gt_shash_get(gtf->refs, name, gt_gtf_ref);
}
GT_INLINE bool gt_gtf_contains_ref(const gt_gtf* const gtf, char* const name){
return gt_shash_is_contained(gtf->refs, name);
}
GT_INLINE gt_string* gt_gtf_get_gene_id(const gt_gtf* const gtf, char* const name){
if(!gt_gtf_contains_gene_id(gtf, name)){
gt_string* const gene_id = gt_string_set_new(name);
gt_shash_insert(gtf->gene_ids, name, gene_id, gt_string*);
}
return gt_shash_get(gtf->gene_ids, name, gt_string);
}
GT_INLINE bool gt_gtf_contains_gene_id(const gt_gtf* const gtf, char* const name){
return gt_shash_is_contained(gtf->gene_ids, name);
}
GT_INLINE gt_string* gt_gtf_get_transcript_id(const gt_gtf* const gtf, char* const name){
if(!gt_gtf_contains_transcript_id(gtf, name)){
gt_string* const gene_id = gt_string_set_new(name);
gt_shash_insert(gtf->transcript_ids, name, gene_id, gt_string*);
}
return gt_shash_get(gtf->transcript_ids, name, gt_string);
}
GT_INLINE bool gt_gtf_contains_transcript_id(const gt_gtf* const gtf, char* const name){
return gt_shash_is_contained(gtf->transcript_ids, name);
}
GT_INLINE gt_string* gt_gtf_get_gene_type(const gt_gtf* const gtf, char* const name){
if(!gt_gtf_contains_gene_type(gtf, name)){
gt_string* const gene_type = gt_string_set_new(name);
gt_shash_insert(gtf->gene_types, name, gene_type, gt_string*);
}
return gt_shash_get(gtf->gene_types, name, gt_string);
}
GT_INLINE bool gt_gtf_contains_gene_type(const gt_gtf* const gtf, char* const name){
return gt_shash_is_contained(gtf->gene_types, name);
}
GT_INLINE gt_gtf_entry* gt_gtf_get_gene_by_id(const gt_gtf* const gtf, char* const key){
if(gt_shash_is_contained(gtf->genes, key)){
return gt_shash_get_element(gtf->genes, key);
}
return NULL;
}
GT_INLINE gt_gtf_entry* gt_gtf_get_transcript_by_id(const gt_gtf* const gtf, char* const key){
if(gt_shash_is_contained(gtf->transcripts, key)){
return gt_shash_get_element(gtf->transcripts, key);
}
return NULL;
}
/**
* Comparator that compares two gtf_entries by starting position
*/
GT_INLINE int gt_gtf_sort_by_start_cmp_(const gt_gtf_entry** a, const gt_gtf_entry** b){
uint64_t p1 = (*a)->start;
uint64_t p2 = (*b)->start;
return p1 < p2 ? -1 : (p1>p2 ? 1 : gt_string_cmp( (*a)->type, (*b)->type ));
}
/**
* Comparator that compares two gtf_entries by ending position
*/
GT_INLINE int gt_gtf_sort_by_end_cmp_(const gt_gtf_entry** a, const gt_gtf_entry** b){
uint64_t p1 = (*a)->end;
uint64_t p2 = (*b)->end;
return p1 < p2 ? -1 : (p1>p2 ? 1 : gt_string_cmp( (*a)->type, (*b)->type ));
}
/**
* Sort vector of gt_gtf_entries by starting position
*/
GT_INLINE void gt_gtf_sort_by_start(gt_vector* entries) {
qsort(gt_vector_get_mem(entries, gt_gtf_entry*),
gt_vector_get_used(entries),
sizeof(gt_gtf_entry**),
(int (*)(const void *,const void *))gt_gtf_sort_by_start_cmp_);
}
/**
* Sort vector of gt_gtf_entries by ending position
*/
GT_INLINE void gt_gtf_sort_by_end( gt_vector* entries) {
qsort(gt_vector_get_mem(entries, gt_gtf_entry*),
gt_vector_get_used(entries),
sizeof(gt_gtf_entry**),
(int (*)(const void *,const void *))gt_gtf_sort_by_end_cmp_);
}
GT_INLINE gt_gtf_node* gt_gtf_create_node(gt_vector* entries){
const uint64_t len = gt_vector_get_used(entries);
if(len == 0){
return NULL;
}
gt_gtf_node* const node = malloc(sizeof(gt_gtf_node));
const gt_gtf_entry* mid = *gt_vector_get_elm(entries, len/2, gt_gtf_entry*);
node->midpoint = mid->start + ((mid->end - mid->start)/2);
node->entries_by_end = gt_vector_new(16, sizeof(gt_gtf_entry*));
node->entries_by_start = gt_vector_new(16, sizeof(gt_gtf_entry*));
gt_vector* to_left = gt_vector_new(16, sizeof(gt_gtf_entry*));
gt_vector* to_right = gt_vector_new(16, sizeof(gt_gtf_entry*));
GT_VECTOR_ITERATE(entries, element, counter, gt_gtf_entry*){
if((*element)->end < node->midpoint){
gt_vector_insert(to_left, (*element), gt_gtf_entry*);
}else if((*element)->start > node->midpoint){
gt_vector_insert(to_right, (*element), gt_gtf_entry*);
}else{
gt_vector_insert(node->entries_by_end, (*element), gt_gtf_entry*);
gt_vector_insert(node->entries_by_start, (*element), gt_gtf_entry*);
}
}
// sort the start and end lists
gt_gtf_sort_by_start(node->entries_by_start);
gt_gtf_sort_by_end(node->entries_by_end);
// delete incoming entry list
gt_vector_delete(entries);
if(gt_vector_get_used(to_left) > 0){
// create left node
node->left = gt_gtf_create_node(to_left);
}else{
node->left = NULL;
gt_vector_delete(to_left);
}
if(gt_vector_get_used(to_right) > 0){
// create right node
node->right = gt_gtf_create_node(to_right);
}else{
node->right = NULL;
gt_vector_delete(to_right);
}
return node;
}
/*
* Read next tab separated field from line or return NULL if no such field exists
*/
GT_INLINE char* gt_gtf_read_gtf_field_(char** line){
char* current = *line;
GT_READ_UNTIL(line, **line=='\t');
if(GT_IS_EOL(line)) return NULL;
**line = EOS;
GT_NEXT_CHAR(line);
return current;
}
GT_INLINE gt_status gt_gtf_read_attributes_(char** line, gt_shash* attrs){
gt_shash_clear(attrs, false);
while(!GT_IS_EOL(line)){
while(**line == ' ') GT_NEXT_CHAR(line);
if(**line == EOL || **line == EOS) return GT_STATUS_OK;
// get the attribute name
char* name = *line;
GT_READ_UNTIL(line, **line==' ')
if(GT_IS_EOL(line)){
gt_error_msg("Error parsing GTF attributes. Expected space but found end of line");
return GT_GTF_INVALID_LINE;
}
**line = EOS;
GT_NEXT_CHAR(line);
// skip to attribute start
while(**line == ' ') GT_NEXT_CHAR(line);
// remove starting quote
if(**line == '"') GT_NEXT_CHAR(line);
char* attr = *line;
// skip until the closing ;
while(**line != ';') GT_NEXT_CHAR(line);
if(GT_IS_EOL(line)) return GT_GTF_INVALID_LINE;
// remove trailing quotes and add EOS
if(*(*line-1) == '"') *(*line-1) = EOS;
else **line = EOS;
GT_NEXT_CHAR(line);
// add attribute
if(gt_shash_is_contained(attrs, name)){
gt_shash_remove(attrs, name, false);
}
gt_shash_insert(attrs, name, attr, char*);
if(gt_shash_is_contained(attrs, "gene_id") &&
gt_shash_is_contained(attrs, "gene_type") &&
gt_shash_is_contained(attrs, "transcript_id")){
return GT_STATUS_OK;
}
}
return GT_STATUS_OK;
}
/**
* Parse a single GTF line
*/
GT_INLINE gt_status gt_gtf_read_line(char* line, gt_gtf* const gtf, uint64_t counter, gt_shash* attrs){
// skip comments
if(line[0] == '#'){
return GT_STATUS_OK;
}
char* ref = NULL;
char* type = NULL;
uint64_t start = 0;
uint64_t end = 0;
gt_strand strand = UNKNOWN;
char* current = line;
ref = gt_gtf_read_gtf_field_(&line);
if(ref == NULL){
gt_error_msg("Unable to parse name: '%s'", line);
return GT_GTF_INVALID_LINE;
}
// SKIP source
current = gt_gtf_read_gtf_field_(&line);
if(current == NULL){
gt_error_msg("Unable to parse source: '%s'", line);
return GT_GTF_INVALID_LINE;
}
// type
type = gt_gtf_read_gtf_field_(&line);
if(type == NULL){
gt_error_msg("Unable to parse type: '%s'", line);
return GT_GTF_INVALID_LINE;
}
// start
current = gt_gtf_read_gtf_field_(&line);
if(current == NULL){
gt_error_msg("Unable to parse start: '%s'", line);
return GT_GTF_INVALID_LINE;
}
start = atol(current);
// end
current = gt_gtf_read_gtf_field_(&line);
if(current == NULL){
gt_error_msg("Unable to parse end: '%s'", line);
return GT_GTF_INVALID_LINE;
}
end = atol(current);
// SKIP score
current = gt_gtf_read_gtf_field_(&line);
if(current == NULL){
gt_error_msg("Unable to parse score: '%s'", line);
return GT_GTF_INVALID_LINE;
}
// strand
current = gt_gtf_read_gtf_field_(&line);
if(current == NULL) return GT_GTF_INVALID_LINE;
if(current == NULL){
gt_error_msg("Unable to parse strand: '%s'", line);
return GT_GTF_INVALID_LINE;
}
if(*current == '+'){
strand = FORWARD;
}else if(*current == '-'){
strand = REVERSE;
}
// SIKP last thing where i can not remember what it was
current = gt_gtf_read_gtf_field_(&line);
if(current == NULL){
gt_error_msg("Unable to parse last: '%s'", line);
return GT_GTF_INVALID_LINE;
}
// WARNING >>> the attribute parser stops after
// the currently used feels are found. If you want
// to add a field, also update the attribute parser
if(gt_gtf_read_attributes_(&line, attrs) != GT_STATUS_OK){
gt_error_msg("Unable to parse attributes: '%s'", line);
return GT_GTF_INVALID_ATTRIBUTES;
}
// get the type or create it
gt_string* tp = gt_gtf_get_type(gtf, type);
gt_gtf_entry* e = gt_gtf_entry_new(start, end, strand, tp);
e->uid = counter;
if(gt_shash_is_contained(attrs, "gene_id")){
e->gene_id = gt_gtf_get_gene_id(gtf, gt_shash_get(attrs, "gene_id", char));
}
if(gt_shash_is_contained(attrs, "gene_type")){
e->gene_type = gt_gtf_get_gene_type(gtf, gt_shash_get(attrs, "gene_type", char));
}
if(gt_shash_is_contained(attrs, "transcript_id")){
e->transcript_id = gt_gtf_get_transcript_id(gtf, gt_shash_get(attrs, "transcript_id", char));
}
// get the ref or create it
gt_gtf_ref* gtref = gt_gtf_get_ref(gtf, ref);
gt_vector_insert(gtref->entries, e, gt_gtf_entry*);
if(strcmp(e->type->buffer, "gene") == 0){
gt_shash_insert(gtf->genes, e->gene_id->buffer, e, gt_gtf_entry*);
}
if(strcmp(e->type->buffer, "transcript") == 0){
gt_shash_insert(gtf->transcripts, e->transcript_id->buffer, e, gt_gtf_entry*);
}
return GT_STATUS_OK;
}
bool gt_gtf_hits_junction(gt_map* map, gt_gtf_entry* e){
uint64_t rs = gt_map_get_begin_mapping_position(map);
uint64_t re = gt_map_get_end_mapping_position(map);
bool hit = (rs==e->start) || (rs==e->end) || (re == e->end) || (re == e->start);
return hit;
}
GT_INLINE uint64_t gt_gtf_get_map_begin(gt_map* const map){
return gt_map_get_begin_mapping_position(map) + gt_map_get_left_trim_length(map);
}
GT_INLINE uint64_t gt_gtf_get_map_end(gt_map* const map){
return gt_map_get_end_mapping_position(map);
}
/**
* Iterate over the map blocks and count exon-exon junctions that are annotated
*/
GT_INLINE uint64_t gt_gtf_count_junction(const gt_gtf* const gtf, gt_map* const map){
uint64_t blocks = gt_map_get_num_blocks(map);
if(blocks <= 1) return 0; // single block map
uint64_t num_junctions = 0;
char* seq_name = gt_map_get_seq_name(map);
gt_vector* hits = gt_vector_new(16, sizeof(gt_gtf_entry*));
gt_shash* last_hits = NULL;
GT_MAP_ITERATE(map, block){
uint64_t start = gt_map_get_begin_mapping_position(block);
uint64_t end = gt_map_get_end_mapping_position(block);
if(last_hits != NULL){
// there was a block before, check if we found an annotated junction
gt_gtf_search(gtf, hits, seq_name, start, start, true);
GT_VECTOR_ITERATE(hits, e, c, gt_gtf_entry*){
gt_gtf_entry* hit = *e;
if(hit->transcript_id != NULL && hit->type != NULL && strcmp(hit->type->buffer, "exon") == 0){
if(gt_shash_is_contained(last_hits, hit->transcript_id->buffer)){
num_junctions++;
break;
}
}
}
}
if(last_hits == NULL) last_hits = gt_shash_new();
else gt_shash_clear(last_hits, true);
// search for the overlaps with the end of the block
gt_gtf_search(gtf, hits, seq_name, end, end, true);
GT_VECTOR_ITERATE(hits, e, c, gt_gtf_entry*){
gt_gtf_entry* hit = *e;
if(hit->transcript_id != NULL && hit->type != NULL && strcmp(hit->type->buffer, "exon") == 0){
gt_gtf_count_(last_hits, hit->transcript_id->buffer);
}
}
}
gt_vector_delete(hits);
gt_shash_delete(last_hits, true);
return num_junctions;
}
void gt_gtf_print_entry_(FILE* target, gt_gtf_entry* e, gt_map* map){
if(map != NULL){
gt_output_map_fprint_map(target, map, NULL);
fprintf(target, " ==> ");
}
if(e->type != NULL){
fprintf(target, "%s : %"PRIu64" - %"PRIu64" (%c)", e->type->buffer, e->start, e->end, (e->strand==FORWARD?'+':'-') );
}
if(e->gene_id != NULL){
fprintf(target, " GID:%s", e->gene_id->buffer);
}
if(e->transcript_id != NULL){
fprintf(target, " TID:%s", e->transcript_id->buffer);
}
if(e->type != NULL){
fprintf(target, " [%s]", e->type->buffer);
}
if(e->gene_type != NULL){
fprintf(target, " [%s]", e->gene_type->buffer);
}
fprintf(target, " [#transcripts: %"PRIu64"]", e->num_children);
if(map != NULL && gt_gtf_hits_junction(map, e)){
fprintf(target, " [Hits JS]");
}
fprintf(target, "\n");
}
GT_INLINE gt_gtf_hit* gt_gtf_hit_new(void){
gt_gtf_hit* hit = malloc(sizeof(gt_gtf_hit));
hit->exon_overlap = 0.0;
hit->intron_length = 0.0;
hit->is_protein_coding = false;
hit->junction_hits = 0.0;
hit->map = NULL;
hit->num_junctions = 0;
hit->pairs_transcript = false;
hit->pairs_splits = false;
hit->pairs_gene = false;
hit->num_junctions_hits =0;
hit->num_template_blocks = 0;
hit->transcripts = NULL;
hit->genes = NULL;
hit->hits_exon = false;
return hit;
}
GT_INLINE void gt_gtf_hit_delete(gt_gtf_hit* hit){
if(hit->transcripts != NULL){
gt_shash_delete(hit->transcripts, true);
}
if(hit->genes != NULL){
gt_shash_delete(hit->genes, true);
}
free(hit);
}
GT_INLINE gt_status gt_gtf_reload_buffer(gt_buffered_input_file* const buffered_fasta_input) {
GT_BUFFERED_INPUT_FILE_CHECK(buffered_fasta_input);
// Dump buffer if BOF it attached to input, and get new out block (always FIRST)
gt_buffered_input_file_dump_attached_buffers(buffered_fasta_input->attached_buffered_output_file);
// Read new input block
const uint64_t read_lines = gt_buffered_input_file_get_block(buffered_fasta_input, GT_NUM_LINES_50K);
if (gt_expect_false(read_lines==0)) return GT_INPUT_FILE_EOF;
// Assign block ID
gt_buffered_input_file_set_id_attached_buffers(buffered_fasta_input->attached_buffered_output_file,buffered_fasta_input->block_id);
return GT_STATUS_OK;
}
GT_INLINE gt_status gt_gtf_get_line(gt_buffered_input_file* const buffered_input, gt_string* const line) {
GT_BUFFERED_INPUT_FILE_CHECK(buffered_input);
GT_STRING_CHECK(line);
gt_status error_code;
// Check the end_of_block. Reload buffer if needed
if (gt_buffered_input_file_eob(buffered_input)) {
if ((error_code=gt_gtf_reload_buffer(buffered_input))!=GT_IMP_OK) return error_code;
}
// Prepare the template
char* const line_start = buffered_input->cursor;
gt_string_clear(line);
GT_INPUT_FILE_SKIP_LINE(buffered_input);
gt_string_set_nstring_static(line, line_start, (buffered_input->cursor - line_start));
return GT_IMP_OK;
}
GT_INLINE uint64_t gt_gtf_merge_(const gt_gtf* const target, gt_gtf* source, uint64_t counter){
// get the type or create it
GT_SHASH_BEGIN_KEY_ITERATE(source->refs, key){
gt_gtf_ref* source_ref = gt_gtf_get_ref(source, key);
gt_gtf_ref* target_ref = gt_gtf_get_ref(target, key);
GT_VECTOR_ITERATE(source_ref->entries, value, c, gt_gtf_entry*){
gt_gtf_entry* e = *value;
e->uid = counter++;
if(e->gene_id != NULL){
e->gene_id = gt_gtf_get_gene_id(target, gt_string_get_string(e->gene_id));
}
if(e->transcript_id != NULL){
e->transcript_id = gt_gtf_get_transcript_id(target, gt_string_get_string(e->transcript_id));
}
if(e->type != NULL)e->type = gt_gtf_get_type(target, gt_string_get_string(e->type));
if(e->gene_type != NULL)e->gene_type = gt_gtf_get_gene_type(target, gt_string_get_string(e->gene_type));
gt_vector_insert(target_ref->entries, e, gt_gtf_entry*);
if(strcmp(e->type->buffer, GT_GTF_TYPE_GENE) == 0 && !gt_shash_is_contained(target->genes, e->gene_id->buffer)){
gt_shash_insert(target->genes, e->gene_id->buffer, e, gt_gtf_entry*);
}
if(strcmp(e->type->buffer, GT_GTF_TYPE_TRANSCRIPT) == 0 && !gt_shash_is_contained(target->transcripts, e->transcript_id->buffer)){
gt_shash_insert(target->transcripts, e->transcript_id->buffer, e, gt_gtf_entry*);
}
}
}GT_SHASH_END_ITERATE;
return counter;
}
GT_INLINE gt_gtf* gt_gtf_read_from_stream(FILE* input, uint64_t threads){
gt_input_file* input_file = gt_input_stream_open(input);
return gt_gtf_read(input_file, threads);
}
GT_INLINE gt_gtf* gt_gtf_read_from_file(char* input, uint64_t threads){
gt_input_file* input_file = gt_input_file_open(input, false);
return gt_gtf_read(input_file, threads);
}
GT_INLINE gt_gtf* gt_gtf_read(gt_input_file* input_file, const uint64_t threads){
GT_NULL_CHECK(input_file);
GT_ZERO_CHECK(threads);
uint64_t counter = 0;
uint64_t i = 0;
gt_gtf* const gtf = gt_gtf_new();
gt_gtf** gtfs = gt_calloc(threads-1, gt_gtf*, true);
for(i=0; i<threads-1; i++){
gtfs[i] = gt_gtf_new();
}
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(threads)
#endif
{
#ifdef HAVE_OPENMP
uint64_t tid = omp_get_thread_num();
#else
uint64_t tid=0;
#endif
gt_buffered_input_file* buffered_input = gt_buffered_input_file_new(input_file);
gt_string* buffered_line = gt_string_new(GTF_MAX_LINE_LENGTH);
gt_gtf* thread_gtf;
if(tid == 0){
thread_gtf = gtf;
}else{
thread_gtf = gtfs[tid-1];
}
gt_shash* attrs = gt_shash_new();
while(gt_gtf_get_line(buffered_input, buffered_line)){
if(gt_gtf_read_line(buffered_line->buffer, thread_gtf, buffered_input->current_line_num, attrs) != GT_STATUS_OK){
// raise error
gt_fatal_error_msg("Failed to parse GTF line '%s'", buffered_line->buffer);
}
counter++;
}
gt_shash_delete(attrs, false);
gt_buffered_input_file_close(buffered_input);
gt_string_delete(buffered_line);
}
gt_input_file_close(input_file);
counter = 0;
// merge all the thread gtfs into a single one
for(i=0; i<threads-1; i++){
counter = gt_gtf_merge_(gtf, gtfs[i], counter);
gt_gtf_delete(gtfs[i]);
}
free(gtfs);
gt_string* const exon_t = gt_string_set_new("exon");
gt_string* const transcript_t = gt_string_set_new("transcript");
gt_string* const intron_t = gt_string_set_new("intron");
// sort the refs
GT_SHASH_BEGIN_ELEMENT_ITERATE(gtf->refs,shash_element,gt_gtf_ref) {
// sort by start position
gt_gtf_sort_by_start(shash_element->entries);
uint64_t size = gt_vector_get_used(shash_element->entries);
uint64_t i = 0;
gt_shash* last_exons = gt_shash_new();
gt_shash* exons_counts = gt_shash_new();
for(i=0; i<size; i++){
gt_gtf_entry* entry = *gt_vector_get_elm(shash_element->entries, i, gt_gtf_entry*);
if(entry->type != NULL && gt_string_equals(exon_t, entry->type)){
gt_string* transcript_id = entry->transcript_id;
if(transcript_id != NULL){
// set exon id and count the exon for the transcript
entry->num_children = gt_gtf_get_count_(exons_counts, transcript_id->buffer);
gt_gtf_count_(exons_counts, transcript_id->buffer);
if(!gt_shash_is_contained(last_exons, gt_string_get_string(transcript_id))){
gt_shash_insert(last_exons, gt_string_get_string(transcript_id), entry, gt_gtf_entry*);
}else{
gt_gtf_entry* prev_exon = gt_shash_get_element(last_exons, gt_string_get_string(transcript_id));
gt_gtf_entry* intron = gt_gtf_entry_new(prev_exon->end+1,
entry->start-1,
prev_exon->strand,
intron_t);
intron->transcript_id = transcript_id;
intron->gene_id = prev_exon->gene_id;
intron->uid = counter++;
gt_vector_insert(shash_element->entries, intron, gt_gtf_entry*);
gt_shash_remove(last_exons, gt_string_get_string(transcript_id),false);
gt_shash_insert(last_exons, gt_string_get_string(transcript_id), entry, gt_gtf_entry*);
}
// add exon counts
gt_gtf_entry* transcript = gt_gtf_get_transcript_by_id(gtf, gt_string_get_string(entry->transcript_id));
if(transcript != NULL){
transcript->num_children++;
entry->length = transcript->length;
transcript->length += (entry->end - entry->start) + 1;
}
}
}else if(entry->type != NULL && gt_string_equals(transcript_t, entry->type)){
// sum transcript counts for gene id
if(entry->gene_id != NULL){
gt_gtf_entry* gene = gt_gtf_get_gene_by_id(gtf, gt_string_get_string(entry->gene_id));
gene->num_children++;
}
}
}
gt_shash_delete(last_exons, false);
gt_shash_delete(exons_counts, true);
// create a interval tree node for each ref
shash_element->node = gt_gtf_create_node(shash_element->entries);
} GT_SHASH_END_ITERATE
return gtf;
}
/*
* Binary search for start position
*/
GT_INLINE uint64_t gt_gtf_bin_search(gt_vector* const entries, const uint64_t t, const uint64_t end){
uint64_t used = gt_vector_get_used(entries);
uint64_t l = 0;
uint64_t h = used - 1;
uint64_t m = 0;
register gt_gtf_entry* e = *gt_vector_get_elm(entries, h, gt_gtf_entry*);
while(l < h ){
m = (l + h) / 2;
e = *gt_vector_get_elm(entries, m, gt_gtf_entry*);
if(e->start < t){
l = m + 1;
}else{
h = m;
}
}
e = *gt_vector_get_elm(entries, l, gt_gtf_entry*);
if (h == l){
return l;
}else{
return m;
}
}
GT_INLINE void gt_gtf_search_node_(gt_gtf_node* node, const uint64_t start, const uint64_t end, gt_vector* const target){
if(node == NULL) return;
// add overlapping intervals from this node
GT_VECTOR_ITERATE(node->entries_by_start, element, counter, gt_gtf_entry*){
if((*element)->start > end){
break;
}
gt_gtf_entry* e = *element;
//if((*element)->start <= start && (*element)->end >= end){
if((start < e->end && end > e->start)
|| (start >= e->start && end <=e->end)
|| (start < e->end && end >= e->end)
|| (start < e->start && end > e->end)){
gt_vector_insert(target, (*element), gt_gtf_entry*);
}
}
if(end < node->midpoint || start < node->midpoint){
// search left tree
gt_gtf_search_node_(node->left, start, end, target);
}
if (start > node->midpoint || end > node->midpoint){
gt_gtf_search_node_(node->right, start, end, target);
}
}
GT_INLINE uint64_t gt_gtf_search(const gt_gtf* const gtf, gt_vector* const target, char* const ref, const uint64_t start, const uint64_t end, const bool clear_target){
if(clear_target)gt_vector_clear(target);
// make sure the target ref is contained
if (! gt_shash_is_contained(gtf->refs, ref)){
return 0;
}
const gt_gtf_ref* const source_ref = gt_gtf_get_ref(gtf, ref);
gt_gtf_search_node_(source_ref->node, start, end, target);
return gt_vector_get_used(target);
}
GT_INLINE void gt_gtf_count_(gt_shash* const table, char* const element){
if(!gt_shash_is_contained(table, element)){
uint64_t* v = gt_malloc_uint64();
*v = 1;
gt_shash_insert(table, element, v, uint64_t);
}else{
uint64_t* v = gt_shash_get(table,element,uint64_t);
++(*v);
}
}
GT_INLINE void gt_gtf_count_custom_(gt_shash* const table, char* const element, uint64_t c){
if(!gt_shash_is_contained(table, element)){
uint64_t* v = gt_malloc_uint64();
*v = c;
gt_shash_insert(table, element, v, uint64_t);
}else{
uint64_t* v = gt_shash_get(table,element,uint64_t);
*v += c;
}
}
GT_INLINE void gt_gtf_count_sum_(gt_shash* const table, char* const element, uint64_t value){
if(!gt_shash_is_contained(table, element)){
uint64_t* v = gt_malloc_uint64();
*v = value;
gt_shash_insert(table, element, v, uint64_t);
}else{
uint64_t* v = gt_shash_get(table,element,uint64_t);
*v += value;
}
}
GT_INLINE void gt_gtf_count_weight_(gt_shash* const table, char* const element, double weight){
if(!gt_shash_is_contained(table, element)){
double* v = malloc(sizeof(double*));
*v = weight;
gt_shash_insert(table, element, v, double);
}else{
double* v = gt_shash_get(table,element,double);
*v += weight;
}
}
GT_INLINE uint64_t gt_gtf_get_count_(gt_shash* const table, char* const element){
if(!gt_shash_is_contained(table, element)){
return 0;
}
uint64_t* v = gt_shash_get(table,element,uint64_t);
return *v;
}
GT_INLINE float gt_gtf_get_count_weight(gt_shash* const table, char* const element){
if(!gt_shash_is_contained(table, element)){
return 0.0;
}
double* v = gt_shash_get(table,element,double);
return *v;
}
GT_INLINE void gt_gtf_create_hit(gt_vector* search_hits, gt_shash* all_genes, gt_gtf_hits* hits, gt_gtf_hit* template_hit){
template_hit->transcripts = gt_shash_new();
template_hit->genes = gt_shash_new();
template_hit->is_protein_coding = false;
template_hit->hits_exon = false;
bool counted_protein = false;
// set gene count
GT_SHASH_BEGIN_ITERATE(all_genes, gene_id, c, uint64_t){
gt_gtf_count_sum_(template_hit->genes, gene_id, *c);
}GT_SHASH_END_ITERATE;
GT_VECTOR_ITERATE(search_hits, v, c, gt_gtf_entry*){
gt_gtf_entry* e = *v;
// count transcript
if(e->transcript_id != NULL){
gt_gtf_count_(template_hit->transcripts, gt_string_get_string(e->transcript_id));
}
if(!template_hit->hits_exon && strcmp(e->type->buffer, "exon") == 0){
template_hit->hits_exon = true;
}
if(!counted_protein && e->gene_type != NULL){
template_hit->is_protein_coding |= (strcmp(e->gene_type->buffer, "protein_coding") == 0);
hits->num_protein_coding++;
counted_protein = true;
}
}
template_hit->pairs_gene = (gt_shash_get_num_elements(all_genes) > 1); // single gene
template_hit->pairs_transcript = (gt_shash_get_num_elements(template_hit->transcripts) == 1); // single gene
hits->num_paired_genes += (template_hit->pairs_gene ? 1 : 0);
gt_vector_insert(hits->exon_hits, template_hit, gt_gtf_hit*);
}
GT_INLINE void gt_gtf_search_template_hits(const gt_gtf* const gtf, gt_gtf_hits* const hits, gt_template* const template_src){
gt_vector* const search_hits = gt_vector_new(32, sizeof(gt_gtf_entry*));
// reset the hits
gt_gtf_hits_clear(hits);
gt_shash* all_genes = gt_shash_new();
// process paired alignment
GT_TEMPLATE_ITERATE_MMAP__ATTR_(template_src,mmap,mmap_attr) {
gt_gtf_hit* template_hit = gt_gtf_hit_new();
template_hit->num_template_blocks = gt_template_get_num_blocks(template_src);
template_hit->mmap = mmap;
template_hit->map = NULL;
template_hit->map_attributes = mmap_attr;
template_hit->num_junctions = (gt_map_get_num_blocks(mmap[0]) + gt_map_get_num_blocks(mmap[1])) - 2;
template_hit->num_junctions_hits = gt_gtf_count_junction(gtf, mmap[0]) + gt_gtf_count_junction(gtf, mmap[1]);
double junction_ratio = template_hit->num_junctions == 0 ? -1.0 : (double)template_hit->num_junctions_hits/(double)template_hit->num_junctions;
if(junction_ratio > 0 && junction_ratio > hits->junction_hit_ration) hits->junction_hit_ration = junction_ratio;
gt_shash_clear(all_genes, true);
gt_gtf_count_map(gtf, mmap[0], mmap[1], NULL, all_genes, NULL, NULL);
gt_gtf_search_map(gtf, search_hits, mmap[0], true);
gt_gtf_search_map(gtf, search_hits, mmap[1], false);
gt_gtf_create_hit(search_hits, all_genes, hits, template_hit);
hits->num_genes += gt_shash_get_num_elements(all_genes);
}
gt_shash_delete(all_genes, true);
gt_vector_delete(search_hits);
}
GT_INLINE void gt_gtf_search_alignment_hits(const gt_gtf* const gtf, gt_gtf_hits* const hits, gt_alignment* const alignment){
gt_vector* const search_hits = gt_vector_new(32, sizeof(gt_gtf_entry*));
// reset the hits
gt_gtf_hits_clear(hits);
gt_shash* all_genes = gt_shash_new();
// process paired alignment
GT_ALIGNMENT_ITERATE(alignment, map){
gt_gtf_hit* template_hit = gt_gtf_hit_new();
template_hit->map = map;
template_hit->mmap = NULL;
template_hit->num_junctions = gt_map_get_num_blocks(map) - 1;
template_hit->num_junctions_hits = gt_gtf_count_junction(gtf, map);
template_hit->num_template_blocks = 1;
double junction_ratio = template_hit->num_junctions == 0 ? -1.0 : (double)template_hit->num_junctions_hits/(double)template_hit->num_junctions;
if(junction_ratio > 0 && junction_ratio > hits->junction_hit_ration) hits->junction_hit_ration = junction_ratio;
gt_shash_clear(all_genes, false);
gt_gtf_count_map(gtf, map, NULL, NULL, all_genes, NULL, NULL);
gt_gtf_search_map(gtf, search_hits, map, true);
gt_gtf_create_hit(search_hits, all_genes, hits, template_hit);
hits->num_genes += gt_shash_get_num_elements(all_genes);
}
gt_shash_delete(all_genes, false);
gt_vector_delete(search_hits);
}
GT_INLINE void gt_gtf_count_add_(gt_shash* const source, gt_shash* const target){
GT_SHASH_BEGIN_ITERATE(source, key, value, uint64_t){
if(!gt_shash_is_contained(target, key)){
uint64_t* v = gt_malloc_uint64();
*v = *value;
gt_shash_insert(target, key, v, uint64_t);
}else{
uint64_t* v = gt_shash_get(target,key,uint64_t);
*v += (*value);
}
}GT_SHASH_END_ITERATE;
}
GT_INLINE void gt_gtf_add_coverage(uint64_t* store, const uint64_t transcript_length, const uint64_t bucket){
// add to all
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_ALL, bucket)] += 1;
if(transcript_length <= 150){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_150, bucket)] += 1;
}
if(transcript_length > 150 && transcript_length <= 250){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_250, bucket)] += 1;
}
if(transcript_length > 250 && transcript_length <= 500){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_500, bucket)] += 1;
}
if(transcript_length > 500 && transcript_length <= 1000){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_1000, bucket)] += 1;
}
if(transcript_length > 1000 && transcript_length <= 2500){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_2500, bucket)] += 1;
}
if(transcript_length > 2500 && transcript_length <= 5000){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_5000, bucket)] += 1;
}
if(transcript_length > 5000 && transcript_length <= 7500){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_7500, bucket)] += 1;
}
if(transcript_length > 7500 && transcript_length <= 10000){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_10000, bucket)] += 1;
}
if(transcript_length > 10000 && transcript_length <= 15000){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_15000, bucket)] += 1;
}
if(transcript_length > 15000 && transcript_length <= 20000){
store[GT_GTF_COVERGAGE_GET_BUCKET(GT_GTF_COVERAGE_LENGTH_20000, bucket)] += 1;
}
}
GT_INLINE void gt_gtf_count_coverage_(const gt_gtf* const gtf, gt_map* const map, char* gene_id,
gt_gtf_count_parms* params){
// get coordinates
uint64_t start = gt_gtf_get_map_begin(map);
uint64_t end = gt_gtf_get_map_end(map);
if(start > end){
return; // happens for (1)>123*... trim followed by split
}
uint64_t map_length = (end-start)+1;
if(map_length <= 1){
// count only maps with at least 2 bases in length
return;
}
// store the search hits and search
gt_vector* const hits = gt_vector_new(32, sizeof(gt_gtf_entry*));
gt_gtf_search(gtf, hits, gt_map_get_seq_name(map), start, end, true);
GT_VECTOR_ITERATE(hits, e, i, gt_gtf_entry*){
gt_gtf_entry* hit = *e;
if(hit->transcript_id == NULL) continue; // no transcript id
if(hit->type == NULL || strcmp("exon", hit->type->buffer) != 0) continue; // no exon or no type
if(gene_id != NULL && (hit->gene_id == NULL || strcmp(hit->gene_id->buffer, gene_id) != 0)) continue; // we are looking for a specific gene_id
gt_gtf_entry* transcript = gt_gtf_get_transcript_by_id(gtf, hit->transcript_id->buffer);
if(transcript == NULL || transcript->length <= 100){
continue;
}
if(hit->gene_id == NULL) continue; // no gene id on the hit
gt_gtf_entry* gene = gt_gtf_get_gene_by_id(gtf,hit->gene_id->buffer);
if(gene == NULL) continue; // no gene found
if(gene_id != NULL && strcmp(gene_id, gene->gene_id->buffer) != 0) continue; // we are looking for a specific hit
uint64_t exon_length = (hit->end - hit->start) + 1;
int64_t rel_start = start - hit->start;
int64_t rel_end = (rel_start + map_length) - 1;
if(rel_start < 0){
rel_start = 0;
}
if(rel_end > exon_length){
rel_end = exon_length;
}
if(rel_start >= 0 && rel_end <= exon_length){
// contained in range
// count for exon count
uint64_t start_bucket = (((rel_start/(double)exon_length) * 100.0) + 0.5) - 1;
uint64_t end_bucket = (((rel_end/(double)exon_length) * 100.0) + 0.5) - 1;
uint64_t s = 0;
if(start_bucket < 100 && end_bucket >= start_bucket && end_bucket < 100){
// handle reverse strand and flip coordinates
if(hit->strand == REVERSE){
uint64_t tmp = start_bucket;
start_bucket = (GT_GTF_COVERAGE_BUCKETS - 1) - end_bucket;
end_bucket = (GT_GTF_COVERAGE_BUCKETS - 1) - tmp;
}
// scale up
// count for global count and make exon coordinates relative to transcript
// coordinate range
uint64_t hit_start_on_transcript = hit->length;
if(hit->strand == REVERSE){
// flip the bucket start if this is a gene on reverse strand
// the exon start/end is already flipped
// so we just flip the order of the exons here
hit_start_on_transcript = (transcript->length - hit_start_on_transcript) - exon_length;
}
uint64_t trans_start_bucket = ((((double)hit_start_on_transcript / (double)transcript->length) * 100.0) + 0.5) - 1;
double scale = (double)exon_length / (double) transcript->length;
start_bucket = (scale * (double)start_bucket) + trans_start_bucket;
end_bucket = (scale * (double)end_bucket) + trans_start_bucket;
if(start_bucket < 100 && end_bucket >= start_bucket && end_bucket < 100){
for(s=start_bucket;s<=end_bucket; s++){
//fprintf(stderr, ">>>GLOBAL COUNT %s : %"PRIu64" S/E: %"PRIu64" %"PRIu64" (%"PRIu64") Exon: %"PRIu64" %"PRIu64"\n", transcript->transcript_id->buffer, s, start, end, map_length, hit->start, hit->end);
// count gene body coverage
gt_gtf_add_coverage(params->gene_body_coverage, transcript->length, s);
// count single transcript
if( gene->num_children == 1){
gt_gtf_add_coverage(params->single_transcript_coverage, transcript->length, s);
}
}
}
}else{
gt_fatal_error_msg("Coverage overlap out of range %"PRIu64" %"PRIu64, start_bucket, end_bucket);
}
}
}
gt_vector_delete(hits);
}
/**
* This counts a single continuous block and takes the. Note that we do not perform any checks on
* splits/pairs here and simply count for this single continuous map
*
* @param gt_gtf* gtf the gtf reference
* @param gt_map* continuous map block
* @param gt_shash* type_counts the type counts, i.e exon/intron etc
* @param gt_shash* gene_counts the gene counts with the gene_id's hit by the map.
* @param gt_shash* exon_counts the exon counts with the gene_id's hit by the map.
* @param gt_shash* junction_counts the number of annotated junctions that are hit per gene
* @param float* overlap float pointer that is set to the maximum exon overlap of this block
* @return uint64_t num_gene_exons number of unique gene_ids hit by exons
*/
GT_INLINE uint64_t gt_gtf_count_map_(const gt_gtf* const gtf, gt_map* const map,
gt_shash* const type_counts,
gt_shash* const gene_counts,
gt_shash* const exon_counts,
gt_shash* const junction_counts,
float* overlap, uint64_t total_map_length,
gt_gtf_count_parms* params){
// get coordinates
uint64_t start = gt_gtf_get_map_begin(map);
uint64_t end = gt_gtf_get_map_end(map);
if(start > end){
gt_gtf_count_(type_counts, GT_GTF_TYPE_EMPTY_BLOCK);
return 0; // happens for (1)>123*... where map starts with trim followed by split
}
uint64_t map_length = (end-start)+1;
// store the search hits and search
gt_vector* const hits = gt_vector_new(32, sizeof(gt_gtf_entry*));
gt_gtf_search(gtf, hits, gt_map_get_seq_name(map), start, end, true);
// we do a complete local count for this block
// and then merge the local count with the global count
// to be able to resolve genes/gene_types that are
// through wither the pair information or split information,
// assuming that the counts for the other pair and/or the other split
// are already contained in the globally presented count maps
gt_shash* const local_type_counts = gt_shash_new();
gt_shash* local_gene_counts = gt_shash_new();
gt_shash* local_exon_gene_counts = gt_shash_new();
float max_overlap = 0.0;
GT_VECTOR_ITERATE(hits, e, i, gt_gtf_entry*){
gt_gtf_entry* hit = *e;
// count type
gt_gtf_count_(local_type_counts, gt_string_get_string(hit->type));
// count gene id
if(hit->gene_id != NULL){
gt_gtf_count_(local_gene_counts, gt_string_get_string(hit->gene_id));
}
// count gene_id from exons
if(hit->type != NULL && hit->gene_id != NULL && strcmp("exon", hit->type->buffer) == 0){
if(gt_gtf_hits_junction(map, hit)){
gt_gtf_count_(junction_counts, gt_string_get_string(hit->gene_id));
}
gt_gtf_count_(local_exon_gene_counts, gt_string_get_string(hit->gene_id));
gt_gtf_count_(exon_counts, gt_string_get_string(hit->gene_id));
int64_t o = ((hit->end < end ? hit-> end : end) - (hit->start > start ? hit->start : start)) + 1;
float block_overlap = o <= 0 ? 0.0 : ((float)o)/((float)(map_length));
if(block_overlap > max_overlap) max_overlap = block_overlap;
if(block_overlap > 1.0){
gt_fatal_error_msg("Block overlap > 1.0\nMap : %"PRIu64" %"PRIu64" (%"PRIu64")\nExon :%"PRIu64" %"PRIu64" ", start, end, map_length, hit->start, hit->end);
}
}
}
*overlap += (max_overlap * ( (float)map_length / (float) total_map_length));
if(*overlap > 1.000001){
gt_output_map_fprint_map(stderr, map, NULL); fprintf(stderr, "\n");
gt_fatal_error_msg("Block overlap > 1.0 :: %.10f\nMap length : %"PRIu64" Total length: %"PRIu64" max overlap: %.10f", *overlap, map_length, total_map_length, max_overlap);
}
uint64_t num_gene_hit_exons = gt_shash_get_num_elements(local_exon_gene_counts);
// count types and merge them with the global
// counts. NOTE that the order matters here, so
// we:
// 1. check for NA hits where nothing is found
// 2. count exon hits
// 3. count intron hits
// 4. count unknown if the hit was neither an intron nor exon hit
// all counting steps are exclusive, thats why the order matters!
if(gt_vector_get_used(hits) == 0){
// count 'NA' type if we did not hit anything
gt_gtf_count_(type_counts, GT_GTF_TYPE_NA);
}else if(gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EXON) > 0){
gt_gtf_count_(type_counts, GT_GTF_TYPE_EXON);
}else if(gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_INTRON) > 0){
gt_gtf_count_(type_counts, GT_GTF_TYPE_INTRON);
}else{
gt_gtf_count_(type_counts, GT_GTF_TYPE_UNKNOWN);
}
// make gene counts based on exon hits if we found at least one
if(num_gene_hit_exons > 0){
GT_SHASH_BEGIN_KEY_ITERATE(local_exon_gene_counts, key){
gt_gtf_count_(gene_counts, key);
}GT_SHASH_END_ITERATE;
}else{
// add all gene counts
GT_SHASH_BEGIN_KEY_ITERATE(local_gene_counts, key){
gt_gtf_count_(gene_counts, key);
}GT_SHASH_END_ITERATE;
}
// if(params->single_transcript_coverage != NULL){
// gt_gtf_count_coverage_(gtf, map, NULL, params);
// }
gt_shash_delete(local_gene_counts, true);
gt_shash_delete(local_type_counts, true);
gt_shash_delete(local_exon_gene_counts, true);
gt_vector_delete(hits);
return num_gene_hit_exons;
}
GT_INLINE uint64_t gt_gtf_join_(gt_string* buf, char* base, bool multi_gene, uint64_t blocks){
if(blocks == 0) return 0;
uint64_t i = 0;
uint64_t len = strlen(base);
for(i=0; i<blocks; i++){
gt_string_right_append_string(buf, base, len);
if(multi_gene){
gt_string_right_append_string(buf, "_mg", 3);
}
if(i<blocks-1){
gt_string_append_char(buf, '^');
}
}
return blocks;
}
GT_INLINE double gt_gtf_count_get_sum_(gt_shash* table){
double v = 0;
GT_SHASH_BEGIN_ELEMENT_ITERATE(table, value, uint64_t){
v += *value;
}GT_SHASH_END_ITERATE;
return v;
}
GT_INLINE uint64_t gt_gtf_get_map_length(gt_map* const maps){
uint64_t map_length = 0;
GT_MAP_ITERATE(maps, map){
// get coordinates
uint64_t start = gt_gtf_get_map_begin(map);
uint64_t end = gt_gtf_get_map_end(map);
if(start > end){
continue; // happens for wired thigs like (1)>231*... where the map start with a trim followed by a split
}
map_length += (end-start)+1;
}
return map_length;
}
/**
* Count a map. This respects split maps and unifies gene_id's based on the
* the split. If the both sides of the split match multiple gene_ids but there is
* a common gene_id on both side, only that id is counted. Otherwise a count is set
* for all gene_ids.
* In addition to the counts, if a pattern string is given, it is filled with the type
* pattern with respect to split maps. For example:
*
* exon -> exon
* exon and intron (split map) -> exon^intron
* exon in multiple genes -> exon_mg
*
* The function returns the number of gene_ids hit by the map.
*
* The first map has to be specified, but the second one is options. If it is set,
* the second map block is also checked and counted.
*
*
* @param gt_gtf* gtf the gtf reference
* @param gt_map* map1 the first map
* @param gt_map* map2 the scond map
* @param gt_shash* type_counts the type counts
* @param gt_shash* gene_counts the gene counts
* @param gt_string pattern the pattern string filled based on the types
* @return uint64_t num_gene_hits the number of gene_ids hit by the map
*/
GT_INLINE uint64_t gt_gtf_count_map(const gt_gtf* const gtf, gt_map* const map1, gt_map* const map2,
gt_shash* const pattern_counts, gt_shash* const gene_counts,
gt_string* pattern, gt_gtf_count_parms* params){
// clear patterns
if(pattern != NULL)gt_string_clear(pattern);
// get number of blocks and ensure we have at least one
uint64_t blocks = gt_map_get_num_blocks(map1);
if(map2 != NULL){
blocks += gt_map_get_num_blocks(map2);
}
if(blocks == 0) return 0;
// local counts for all blocks
// and store the number of multi gene exon hits for each block
// in addition we create the base pattern per block here
gt_shash* const local_type_counts = gt_shash_new();
gt_shash* local_gene_counts = gt_shash_new();
gt_shash* local_gene_counts_1 = gt_shash_new();
gt_shash* local_gene_counts_2 = gt_shash_new();
gt_shash* local_junction_counts_1 = gt_shash_new();
gt_shash* local_junction_counts_2 = gt_shash_new();
gt_shash* local_exon_counts_1 = gt_shash_new();
gt_shash* local_exon_counts_2 = gt_shash_new();
uint64_t* const local_exon_gene_hits = malloc(blocks * sizeof(uint64_t));
gt_vector* const local_type_patterns = gt_vector_new(2, sizeof(char*));
uint64_t exons, introns, unknown, not_annotated, empty_blocks;
exons = introns = unknown = not_annotated = empty_blocks =0;
uint64_t i = 0;
float block_1_overlap = 0.0;
float block_2_overlap = 0.0;
uint64_t map_1_length = gt_gtf_get_map_length(map1);
GT_MAP_ITERATE(map1, map_block){
local_exon_gene_hits[i++] = gt_gtf_count_map_(gtf, map_block, local_type_counts, local_gene_counts_1, local_exon_counts_1,local_junction_counts_1, &block_1_overlap, map_1_length, params);
uint64_t _exons = exons + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EXON);
uint64_t _introns = introns + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_INTRON);
uint64_t _unknown = unknown + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_UNKNOWN);
uint64_t _not_annotated = not_annotated + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_NA);
uint64_t _empty_block = empty_blocks + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EMPTY_BLOCK);
// add the pattern string based in the count value that changed
if(_exons > exons) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EXON, char*);
if(_introns > introns) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_INTRON, char*);
if(_unknown > unknown) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_UNKNOWN, char*);
if(_not_annotated > not_annotated) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_NA, char*);
if(_empty_block > empty_blocks) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EMPTY_BLOCK, char*);
exons = _exons;
introns = _introns;
unknown = _unknown;
not_annotated = _not_annotated;
empty_blocks = _empty_block;
}
// if we hit more than one gene,
// try to unify the gene by checking the other blocks for
// overlaps. If we find genes that are covered by all the
// blocks we count only them.
if(gt_shash_get_num_elements(local_gene_counts_1) > 1){
gt_shash* merged_counts = gt_shash_new();
uint64_t blocks1 = gt_map_get_num_blocks(map1);
// search for the best junction hit
uint64_t hits_junctions = 0;
GT_SHASH_BEGIN_ITERATE(local_gene_counts_1, gene_id, count, uint64_t){
uint64_t m = gt_gtf_get_count_(local_junction_counts_1,gene_id);
if(*count == blocks1 && m > 0){
if(m > hits_junctions) hits_junctions = m;
}
}GT_SHASH_END_ITERATE;
GT_SHASH_BEGIN_ITERATE(local_gene_counts_1, gene_id, count, uint64_t){
if(*count == blocks1 && (hits_junctions == 0 || gt_gtf_get_count_(local_junction_counts_1,gene_id) == hits_junctions)){
gt_gtf_count_sum_(merged_counts, gene_id, blocks1);
}
}GT_SHASH_END_ITERATE;
// if we found some unique ids that are covered by both
// we flip over to the merged counts
gt_shash_delete(local_gene_counts_1, true);
local_gene_counts_1 = merged_counts;
// we fliped so we reset the exon gene hit counts to ones as well
if(gt_shash_get_num_elements(merged_counts) > 0){
for(i=0;i<blocks1;i++){
if(local_exon_gene_hits[i] > 0) local_exon_gene_hits[i] = 1;
}
}
}
if(map2 != NULL){
uint64_t map_2_length = gt_gtf_get_map_length(map2);
GT_MAP_ITERATE(map2, map_block){
local_exon_gene_hits[i++] = gt_gtf_count_map_(gtf, map_block, local_type_counts, local_gene_counts_2, local_exon_counts_2, local_junction_counts_2, &block_2_overlap, map_2_length, params);
uint64_t _exons = exons + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EXON);
uint64_t _introns = introns + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_INTRON);
uint64_t _unknown = unknown + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_UNKNOWN);
uint64_t _not_annotated = not_annotated + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_NA);
uint64_t _empty_block = empty_blocks + gt_gtf_get_count_(local_type_counts, GT_GTF_TYPE_EMPTY_BLOCK);
// add the pattern string based in the count value that changed
if(_exons > exons) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EXON, char*);
if(_introns > introns) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_INTRON, char*);
if(_unknown > unknown) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_UNKNOWN, char*);
if(_not_annotated > not_annotated) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_NA, char*);
if(_empty_block > empty_blocks) gt_vector_insert(local_type_patterns, GT_GTF_TYPE_EMPTY_BLOCK, char*);
exons = _exons;
introns = _introns;
unknown = _unknown;
not_annotated = _not_annotated;
empty_blocks = _empty_block;
}
// unify the gene counts based on the number of blocks.
// the gene_counts are reduced to either the ones that are found in
// all blocks or they are kept as they are
if(gt_shash_get_num_elements(local_gene_counts_2) > 1){
gt_shash* merged_counts = gt_shash_new();
uint64_t blocks2 = gt_map_get_num_blocks(map2);
// search for the best junction hit
uint64_t hits_junctions = 0;
GT_SHASH_BEGIN_ITERATE(local_gene_counts_2, gene_id, count, uint64_t){
uint64_t m = gt_gtf_get_count_(local_junction_counts_2,gene_id);
if(*count == blocks2 && m > 0){
if(m > hits_junctions) hits_junctions = m;
}
}GT_SHASH_END_ITERATE;
GT_SHASH_BEGIN_ITERATE(local_gene_counts_2, gene_id, count, uint64_t){
if(*count == blocks2 && (hits_junctions == 0 || gt_gtf_get_count_(local_junction_counts_2,gene_id) == hits_junctions)){
gt_gtf_count_sum_(merged_counts, gene_id, blocks2);
}
}GT_SHASH_END_ITERATE;
// if we found some unique ids that are covered by both
// we flip over to the merged counts
gt_shash_delete(local_gene_counts_2, true);
local_gene_counts_2 = merged_counts;
if(gt_shash_get_num_elements(merged_counts) > 0){
uint64_t blocks1 = gt_map_get_num_blocks(map1);
// we flipped so we reset the exon gene hit counts to ones as well
for(i=blocks1;i<(blocks1+blocks2);i++){
if(local_exon_gene_hits[i] > 0) local_exon_gene_hits[i] = 1;
}
}
}
}
/**
* Merge everything into a single merged map
*/
gt_shash* merged_counts = gt_shash_new();
uint64_t blocks1 = gt_map_get_num_blocks(map1);
uint64_t blocks2 = 0;
if(map2 != NULL){
blocks2 = gt_map_get_num_blocks(map2);
}
float overlap = (block_1_overlap + block_2_overlap) / (float) (map2==NULL?1.0:2.0);
uint64_t map2_hits = map2 != NULL ? gt_shash_get_num_elements(local_gene_counts_2) : 0;
GT_SHASH_BEGIN_ITERATE(local_gene_counts_1, gene_id, count, uint64_t){
if( (gt_shash_is_contained(local_gene_counts_2, gene_id) || map2_hits == 0) && (params == NULL || params->exon_overlap <= 0.0 || overlap >= params->exon_overlap)){
uint64_t nv =*count + gt_gtf_get_count_(local_gene_counts_2, gene_id);
gt_gtf_count_sum_(merged_counts, gene_id, nv);
if(overlap > 1.000001){
gt_fatal_error_msg("Exon Overlap %.10f > 1.0 from %.10f %.10f!", overlap, block_1_overlap, block_2_overlap);
}
}
}GT_SHASH_END_ITERATE;
uint64_t unique_genes_between_pairs = gt_shash_get_num_elements(merged_counts);
// we found unique genes through the pair, so we can use
// the merged map to do the final counts
if(unique_genes_between_pairs > 0){
// we flip the exon gene hit counts in case
if(unique_genes_between_pairs == 1){
for(i=0;i<blocks;i++){
if(local_exon_gene_hits[i] > 0) local_exon_gene_hits[i] = 1;
}
}
// merge the gene counts weighted to a single map
GT_SHASH_BEGIN_KEY_ITERATE(merged_counts, gene_id){
double v = 0.0;
if(gt_shash_is_contained(local_exon_counts_1, gene_id) || ((params == NULL || params->exon_overlap <= 0.0) && gt_shash_is_contained(local_gene_counts_1, gene_id))){
v+= 1.0;
}
if(gt_shash_is_contained(local_exon_counts_2, gene_id) || ((params == NULL || params->exon_overlap <= 0.0 )&& gt_shash_is_contained(local_gene_counts_2, gene_id))){
v+=1.0;
}
if(v > 0.0) gt_gtf_count_weight_(local_gene_counts, gene_id, v);
}GT_SHASH_END_ITERATE;
}
// get the number of hits of this map
uint64_t num_gene_hits = gt_shash_get_num_elements(local_gene_counts);
if(pattern_counts != NULL){
// now iterate the blocks and construct final pattern
for(i=0; i<blocks; i++){
char* p = *(gt_vector_get_elm(local_type_patterns, i, char*));
if(strcmp(p, GT_GTF_TYPE_EMPTY_BLOCK) == 0) continue;
// for exons check that in case we have a single gene hit, its exons, in case of a multi-gene hit, append _mg if
// the multi gene hit comes from the current block
gt_gtf_join_(pattern, p, (strcmp("exon",p) == 0) ? ((num_gene_hits == 1) ? false : (local_exon_gene_hits[i] > 1)) : false, 1);
// add paired end spacer
if(map2 != NULL && i == (blocks1-1)){
gt_string_append_char(pattern, '|');
}else{
if(i<blocks-1){
gt_string_append_char(pattern, '^');
}
}
}
gt_string_append_eos(pattern);
// count global type based on the constructed pattern
gt_gtf_count_(pattern_counts, gt_string_get_string(pattern));
}
if(params != NULL && params->num_maps == 1){
// count junctions for single mapping reads
if(blocks1 > 1){
params->num_junctions += blocks1 - 1;
params->num_annotated_junctions += gt_gtf_count_junction(gtf, map1);
}
if(blocks2 > 1){
params->num_junctions += blocks2 - 1;
params->num_annotated_junctions += gt_gtf_count_junction(gtf, map2);
}
}
if(gene_counts != NULL){
// count the gene ids
GT_SHASH_BEGIN_ITERATE(local_gene_counts, key, e, double){
if(gt_shash_is_contained(gene_counts, key)){
double current = gt_gtf_get_count_weight(gene_counts, key);
if(current < *e){
// set to max count
gt_gtf_count_weight_(gene_counts, key, (*e)-current);
}
}else{
gt_gtf_count_weight_(gene_counts, key, *e);
}
}GT_SHASH_END_ITERATE;
}
if(params != NULL && params->single_transcript_coverage != NULL){
// do coverage counts for merged genes
GT_SHASH_BEGIN_KEY_ITERATE(local_gene_counts, key){
// count map1
GT_MAP_ITERATE(map1, map_block){
gt_gtf_count_coverage_(gtf, map_block, key, params);
}
if(map2 != NULL){
GT_MAP_ITERATE(map2, map_block){
gt_gtf_count_coverage_(gtf, map_block, key, params);
}
}
}GT_SHASH_END_ITERATE;
}
// cleanup
gt_vector_delete(local_type_patterns);
gt_shash_delete(local_gene_counts, true);
// cleanup
gt_shash_delete(local_gene_counts_1, true);
gt_shash_delete(local_gene_counts_2, true);
gt_shash_delete(local_exon_counts_1, true);
gt_shash_delete(local_exon_counts_2, true);
gt_shash_delete(local_junction_counts_1, true);
gt_shash_delete(local_junction_counts_2, true);
gt_shash_delete(local_type_counts, true);
gt_shash_delete(merged_counts, true);
free(local_exon_gene_hits);
return gt_shash_get_num_elements(gene_counts);
}
GT_INLINE uint64_t gt_gtf_count_alignment(gt_gtf* const gtf, gt_alignment* const alignment, gt_shash* const pattern_count, gt_shash* const gene_counts, gt_gtf_count_parms* params){
uint64_t hits = 0;
gt_string* pattern = gt_string_new(16);
params->num_maps = gt_alignment_get_num_maps(alignment);
GT_ALIGNMENT_ITERATE(alignment,map) {
hits = gt_gtf_count_map(gtf, map, NULL, pattern_count, gene_counts, pattern, params);
gt_string_clear(pattern);
}
gt_string_delete(pattern);
return hits;
}
GT_INLINE uint64_t gt_gtf_count_template(gt_gtf* const gtf, gt_template* const template, gt_shash* const pattern_count, gt_shash* const gene_counts, gt_gtf_count_parms* params){
uint64_t hits = 0;
gt_string* pattern = gt_string_new(16);
params->num_maps = gt_template_get_num_mmaps(template);
GT_TEMPLATE_ITERATE_MMAP__ATTR_(template,mmap,mmap_attr) {
hits = gt_gtf_count_map(gtf, mmap[0], mmap[1], pattern_count, gene_counts, pattern, params);
gt_string_clear(pattern);
}
gt_string_delete(pattern);
return hits;
}
GT_INLINE void gt_gtf_search_map(const gt_gtf* const gtf, gt_vector* const hits, gt_map* const map, const bool clean_target){
GT_MAP_ITERATE(map, block){
uint64_t start = gt_map_get_begin_mapping_position(map);
uint64_t end = gt_map_get_end_mapping_position(map);
gt_gtf_search(gtf, hits, gt_map_get_seq_name(map), start, end, clean_target);
}
}
GT_INLINE void gt_gtf_search_alignment(const gt_gtf* const gtf, gt_vector* const hits, gt_alignment* const alignment){
GT_ALIGNMENT_ITERATE(alignment, map){
gt_gtf_search_map(gtf, hits, map, true);
}
}
GT_INLINE void gt_gtf_search_template(const gt_gtf* const gtf, gt_vector* const hits, gt_template* const template){
GT_TEMPLATE_IF_REDUCES_TO_ALIGNMENT(template, alignment){
gt_gtf_search_alignment(gtf,hits, alignment);
}GT_TEMPLATE_END_REDUCTION__RETURN;
gt_gtf_search_alignment(gtf,hits, gt_template_get_block(template, 0));
gt_gtf_search_alignment(gtf,hits, gt_template_get_block(template, 1));
}
|
mssql12_fmt_plug.c | /* Modified in August, 2012 by Dhiru Kholia (dhiru at openwall.com) for MS SQL 2012
*
* This software is Copyright (c) 2010 bartavelle, <bartavelle at bandecon.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* Modified by Mathieu Perrin (mathieu at tpfh.org) 09/06
* Microsoft MS-SQL05 password cracker
*
* UTF-8 support by magnum 2011, same terms as above
*
* Creating MS SQL 2012 hashes:
*
* sqlcmd -L
* sqlcmd -S <server> -U sa -P <password>
* 1> select pwdencrypt("openwall")
* 2> go
*
* Dumping hashes from MS SQL server 2012:
*
* sqlcmd -S <server> -U sa -P <password>
* 1> select * from sys.sql_logins
* 2> go
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mssql12;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mssql12);
#else
#include <string.h>
#include "arch.h"
//#undef _OPENMP
//#undef SIMD_COEF_32
//#undef SIMD_COEF_64
//#undef SIMD_PARA_SHA512
/*
* Only effective for SIMD.
* Undef to disable reversing steps for benchmarking.
*/
#define REVERSE_STEPS
#include "misc.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "unicode.h"
#include "sha2.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#include "memdbg.h"
#ifdef _OPENMP
#include <omp.h>
#ifdef SIMD_COEF_64
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 1024 // tuned K8-dual HT
#endif
#endif
#endif
#define FORMAT_LABEL "mssql12"
#define FORMAT_NAME "MS SQL 2012/2014"
#define ALGORITHM_NAME "SHA512 " SHA512_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH ((111 - SALT_SIZE) / 2)
#define CIPHERTEXT_LENGTH 54 + 44 * 2
#define BINARY_SIZE 8
#define DIGEST_SIZE 64
#define BINARY_ALIGN 8
#define SALT_SIZE 4
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifndef SHA_BUF_SIZ
#define SHA_BUF_SIZ 16
#endif
static struct fmt_tests tests[] = {
{"0x0200F733058A07892C5CACE899768F89965F6BD1DED7955FE89E1C9A10E27849B0B213B5CE92CC9347ECCB34C3EFADAF2FD99BFFECD8D9150DD6AACB5D409A9D2652A4E0AF16", "Password1!"},
{"0x0200AB3E1F9028A739EEF62ABF672427276A32D5EDD349E638E7F2CD81DAA247CFE20EE4E3B0A30B2D0AE3C3FA010E61752F1BF45E045041F1B988C083C7F118527E3E5F0562", "openwall"},
/* hashes from https://hashcat.net/forum */
{"0x02006BF4AB05873FF0C8A4AFD1DC5912CBFDEF62E0520A3353B04E1184F05C873C9C76BBADDEAAC1E9948C7B6ABFFD62BFEFD7139F17F6AFE10BE0FEE7A178644623067C2423", "carlos"},
{"0x0200935819BA20F1C7289CFF2F8FF9F0E40DA5E6D04986F988CFE6603DA0D2BC0160776614763198967D603FBD8C103151A15E70D18E7B494C7F13F16804A7A4EB206084E632", "test"},
{"0x0200570AC969EF7C6CCB3312E8BEDE1D635EB852C06496957F0FA845B20FCD1C7C457474A5B948B68C47C2CB704D08978871F532C9EB11199BB5F56A06AC915C3799DB8A64C1", "test1"},
{"0x0200A56045DBCD848E297FA8D06E7579D62B7129928CA0BC5D232A7320972EF5A5455C01411B8D3A7FF3D18A55058A12FAEE5DA410AFE6CE61FF5C39E5FF57CD3EDD57DB1C3B", "test2"},
{"0x020059799F1B6D897BE2C5A76D3FFDC52B308190E82FA01F2FA51129B4863A7EE21B3FF6FE9F7850976045237805F338DD36DC9345B429F47A402614C6F2F2B02C56DF14C4F4", "Paul"},
{"0x0200881E2999DD8E3583695F405696257B99559953705A34D774C15AC1D42699BB77BC56DB5F657751335C1B350890E643790553B60329CAE7A2E7D3C04CF8856C4DB0058723", "DBAmaster"},
{"0x0200D648446E70180A6DFB6DF14DB38623EBFE490FE445751900FD5DC45A2B5D20D7AFFE8C6FFC2890BAE1AF34430A21F2F1E4DE50E25757FDB4789716D8D85C6985A00BC454", "database"},
{"0x02008AC3B9DC7B67EF9D3C1D25D8007A4B957D5BD61D71E5E9DA08D9F8F012EDDAD168E1CADD93D4627433FBFEE8BCF6CBB42D5B9A31886FC5FF7F970B164F4B5815E03D6DE7", "jhl9mqe5"},
{"0x020094C4D05A082DB1362B1A972C5D5F1C04C527090A7427E93C13AFEC705A011D8980E994FA647C7D44E25A427246218E25674571DB1710E49C713FB17129549C29E303086A", "coldfusion"},
{"0x0200B9BD5C85918D9BEE84417957618FBA1CB80B71E81550FAE09AD027B4089017CD6461D8EC9509873C2D5096CDBE8F16E4EFA9035C35F9F4917CE58DB99DC6836CEA7483A7", "sql2005"},
{NULL}
};
static unsigned char cursalt[SALT_SIZE];
#ifdef SIMD_COEF_64
static uint64_t (*saved_key)[SHA_BUF_SIZ];
static uint64_t (*crypt_out);
static int max_keys;
static int new_keys;
#else
static char (*saved_key)[(PLAINTEXT_LENGTH + 1) * 2 + SALT_SIZE];
static uint64_t (*crypt_out)[DIGEST_SIZE / 8];
static int *saved_len;
#endif
static int valid(char *ciphertext, struct fmt_main *self)
{
int i;
if (strncmp(ciphertext, "0x0200", 6))
return 0;
if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH)
return 0;
for (i = 6; i < CIPHERTEXT_LENGTH; i++) {
if (!((('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) ||
//(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f')) ||
(('A' <= ciphertext[i])&&(ciphertext[i] <= 'F'))))
return 0;
}
return 1;
}
static void set_salt(void *salt)
{
memcpy(cursalt, salt, SALT_SIZE);
#ifdef SIMD_COEF_64
new_keys = 1;
#endif
}
static void *get_salt(char *ciphertext)
{
static unsigned char *out2;
int l;
if (!out2) out2 = mem_alloc_tiny(SALT_SIZE, MEM_ALIGN_WORD);
for (l = 0;l<SALT_SIZE;l++)
{
out2[l] = atoi16[ARCH_INDEX(ciphertext[l*2+6])]*16
+ atoi16[ARCH_INDEX(ciphertext[l*2+7])];
}
return out2;
}
static void set_key_enc(char *_key, int index);
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_64
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key),
MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt,
8 * sizeof(uint64_t),
MEM_ALIGN_SIMD);
max_keys = self->params.max_keys_per_crypt;
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
#endif
if (options.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, PLAINTEXT_LENGTH * 3);
if (options.target_enc != ISO_8859_1 &&
options.target_enc != ASCII)
self->methods.set_key = set_key_enc;
}
static void done(void)
{
#ifndef SIMD_COEF_64
MEM_FREE(saved_len);
#endif
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
#ifdef SIMD_COEF_64
static void clear_keys(void)
{
memset(saved_key, 0, sizeof(*saved_key) * max_keys);
}
#endif
static void set_key(char *_key, int index)
{
#ifndef SIMD_COEF_64
/* ASCII or ISO-8859-1 to UCS-2 */
UTF8 *s = (UTF8*)_key;
UTF16 *d = (UTF16*)saved_key[index];
for (saved_len[index] = 0; s[saved_len[index]]; saved_len[index]++)
#if ARCH_LITTLE_ENDIAN
d[saved_len[index]] = s[saved_len[index]];
#else
d[saved_len[index]] = s[saved_len[index]] << 8;
#endif
d[saved_len[index]] = 0;
saved_len[index] <<= 1;
#else
uint64_t *keybuffer = saved_key[index];
unsigned short *w16 = (unsigned short*)keybuffer;
UTF8 *key = (UTF8*)_key;
int len = 0;
while ((*w16++ = *key++))
len++;
keybuffer[15] = ((len << 1) + SALT_SIZE) << 3;
new_keys = 1;
#endif
}
static void set_key_enc(char *_key, int index)
{
#ifndef SIMD_COEF_64
/* Any encoding -> UTF-16 */
saved_len[index] = enc_to_utf16((UTF16*)saved_key[index],
PLAINTEXT_LENGTH,
(unsigned char*)_key, strlen(_key));
if (saved_len[index] < 0)
saved_len[index] = strlen16((UTF16*)saved_key[index]);
saved_len[index] <<= 1;
#else
uint64_t *keybuffer = saved_key[index];
UTF16 *w16 = (UTF16*)keybuffer;
UTF8 *key = (UTF8*)_key;
int len;
len = enc_to_utf16(w16, PLAINTEXT_LENGTH, key, strlen(_key));
if (len < 0)
len = strlen16(w16);
keybuffer[15] = ((len << 1) + SALT_SIZE) << 3;
new_keys = 1;
#endif
}
static char *get_key(int index)
{
#ifndef SIMD_COEF_64
((UTF16*)saved_key[index])[saved_len[index]>>1] = 0;
return (char*)utf16_to_enc((UTF16*)saved_key[index]);
#else
uint64_t *keybuffer = saved_key[index];
UTF16 *w16 = (UTF16*)keybuffer;
static UTF16 out[PLAINTEXT_LENGTH + 1];
unsigned int i, len;
len = ((keybuffer[15] >> 3) - SALT_SIZE) >> 1;
for (i = 0; i < len; i++)
out[i] = w16[i];
out[i] = 0;
return (char*)utf16_to_enc(out);
#endif
}
static void *get_binary(char *ciphertext)
{
static uint64_t out[SHA_BUF_SIZ];
char *realcipher = (char*)out;
int i;
for (i = 0;i<DIGEST_SIZE;i++)
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2+14])]*16 +
atoi16[ARCH_INDEX(ciphertext[i*2+15])];
#ifdef SIMD_COEF_64
alter_endianity_to_BE64 (realcipher, DIGEST_SIZE/8);
#ifdef REVERSE_STEPS
sha512_reverse(out);
#endif
#endif
return (void *)realcipher;
}
#define BASE_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64)
#ifndef REVERSE_STEPS
#undef SSEi_REVERSE_STEPS
#define SSEi_REVERSE_STEPS 0
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || PLAINTEXT_LENGTH > 1
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_64
if (new_keys) {
int i;
for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) {
uint64_t *keybuffer = saved_key[index + i];
unsigned char *wucp = (unsigned char*)keybuffer;
int j, len = (keybuffer[15] >> 3) - SALT_SIZE;
if (len >= 0)
for (j = 0; j < SALT_SIZE; j++)
wucp[len + j] = cursalt[j];
wucp[len + 4] = 0x80;
}
}
SIMDSHA512body(&saved_key[index], &crypt_out[BASE_IDX], NULL, SSEi_REVERSE_STEPS | SSEi_FLAT_IN);
#else
SHA512_CTX ctx;
memcpy(saved_key[index]+saved_len[index], cursalt, SALT_SIZE);
SHA512_Init(&ctx );
SHA512_Update(&ctx, saved_key[index], saved_len[index]+SALT_SIZE );
SHA512_Final((unsigned char *)crypt_out[index], &ctx);
#endif
}
#ifdef SIMD_COEF_64
new_keys = 0;
#endif
return count;
}
#define HASH_IDX (((unsigned int)index&(SIMD_COEF_64-1))+(unsigned int)index/SIMD_COEF_64*8*SIMD_COEF_64)
#ifdef SIMD_COEF_64
static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return (crypt_out[index])[0] & PH_MASK_0; }
static int get_hash_1(int index) { return (crypt_out[index])[0] & PH_MASK_1; }
static int get_hash_2(int index) { return (crypt_out[index])[0] & PH_MASK_2; }
static int get_hash_3(int index) { return (crypt_out[index])[0] & PH_MASK_3; }
static int get_hash_4(int index) { return (crypt_out[index])[0] & PH_MASK_4; }
static int get_hash_5(int index) { return (crypt_out[index])[0] & PH_MASK_5; }
static int get_hash_6(int index) { return (crypt_out[index])[0] & PH_MASK_6; }
#endif
static int binary_hash_0(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_0; }
static int binary_hash_1(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_1; }
static int binary_hash_2(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_2; }
static int binary_hash_3(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_3; }
static int binary_hash_4(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_4; }
static int binary_hash_5(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_5; }
static int binary_hash_6(void *binary) { return ((uint64_t*)binary)[0] & PH_MASK_6; }
static int cmp_all(void *binary, int count)
{
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_64
if (((uint64_t*)binary)[0] == crypt_out[HASH_IDX])
return 1;
#else
if ( ((uint64_t*)binary)[0] == crypt_out[index][0] )
return 1;
#endif
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_64
return (((uint64_t*)binary)[0] == crypt_out[HASH_IDX]);
#else
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
uint64_t *binary = get_binary(source);
#if SIMD_COEF_64
char *key = get_key(index);
UTF16 wkey[PLAINTEXT_LENGTH];
SHA512_CTX ctx;
uint64_t crypt_out[DIGEST_SIZE / sizeof(uint64_t)];
int len;
len = enc_to_utf16(wkey, PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
if (len < 0)
len = strlen16(wkey);
len *= 2;
SHA512_Init(&ctx);
SHA512_Update(&ctx, wkey, len);
SHA512_Update(&ctx, cursalt, SALT_SIZE);
SHA512_Final((unsigned char*)crypt_out, &ctx);
alter_endianity_to_BE64(crypt_out, DIGEST_SIZE/8);
#ifdef REVERSE_STEPS
sha512_reverse(crypt_out);
#endif
return !memcmp(binary, crypt_out, DIGEST_SIZE);
#else
return !memcmp(binary, crypt_out[index], DIGEST_SIZE);
#endif
}
static int salt_hash(void *salt)
{
// The >> 8 gave much better distribution on a huge set I analysed
// although that was mssql05
return (*((uint32_t *)salt) >> 8) & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_mssql12 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
#ifdef SIMD_COEF_64
clear_keys,
#else
fmt_default_clear_keys,
#endif
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
utils.h | //
// Copyright (c) 2020 xinyan. All rights reserved.
// Created on 2020/4/2.
//
#ifndef PROGRESS_BAR_H_
#define PROGRESS_BAR_H_
#pragma once
#include <sstream>
#include <string>
#include <chrono>
#include <cassert>
#include <random>
#define Debug
#define verbose
//#define MULTI_THREAD
#define DELTA 0
//#define DELTA 0.001
template <typename T >
inline void check_range(T v, T a, T b) {
#ifdef Debug
if (v < a || a >= b) {
if (v < a) {
std::cerr << " v_" << v << " is smaller than " << a;
} else {
std::cerr << " v_" << v << " is bigger than " << b;
}
throw std::runtime_error("exceed range");
}
#endif
}
using std::string;
using std::vector;
using std::chrono::steady_clock;
using std::chrono::microseconds;
using std::chrono::duration_cast;
typedef float ValueType;
typedef int int_t;
class StopW {
std::chrono::steady_clock::time_point time_begin;
public:
StopW() {
reset();
}
long getElapsedTimeMicro(bool reset_time) {
steady_clock::time_point time_end = steady_clock::now();
long elapsed = duration_cast<microseconds>(time_end - time_begin).count();
if (reset_time) {
reset();
}
return elapsed;
}
void reset() {
time_begin = steady_clock::now();
}
};
class ProgressBar {
public:
ProgressBar(int len, string message): len_(len), cur_(0), star_(0) {
std::cout << "0% 10 20 30 40 50 60 70 80 90 100%\t"
<< message
<< std::endl
<< "|----|----|----|----|----|----|----|----|----|----|"
<< std::endl;
}
ProgressBar& update(int i) {
cur_ += i;
int num_star = static_cast<int >(1.0 * cur_ / len_ * 50 + 1);
if (num_star > star_) {
for (int j = 0; j < num_star-star_; ++j) {
std::cout << '*';
}
star_ = num_star;
if (num_star == 51) {
std::cout << std::endl;
}
std::cout << std::flush;
}
return *this;
}
ProgressBar& operator++() {
return update(1);
}
ProgressBar& operator+=(int i) {
return update(i);
}
private:
int len_;
int cur_;
int star_;
};
bool exists_test(const char* name) {
std::ifstream f(name);
return f.good();
}
template <typename DataType>
void load_data(DataType*& data, int_t& dimension, int_t &cardinality, std::string input_path)
{
std::ifstream fin(input_path.c_str(), std::ios::binary | std::ios::ate);
if (!fin) {
std::cout << "cannot open file " << input_path << std::endl;
exit(1);
}
size_t fileSize = fin.tellg();
fin.seekg(0, fin.beg);
if (fileSize == 0) {
std::cout << "file size is 0 " << input_path << std::endl;
exit(1);
}
int dim;
fin.read(reinterpret_cast<char*>(&dim), sizeof(int));
dimension = (size_t)dim;
size_t bytesPerRecord = dimension * sizeof(DataType) + 4;
if (fileSize % bytesPerRecord != 0) {
std::cout << "File not aligned" << std::endl;
exit(1);
}
cardinality = fileSize / bytesPerRecord;
data = new DataType[cardinality * dimension];
fin.read((char*)data, sizeof(DataType) * dimension);
for (int i = 1; i < cardinality; ++i) {
fin.read((char*)&dim, 4);
assert(dim == dimension);
fin.read((char*)(data + i * dimension), sizeof(DataType) * dimension);
}
fin.close();
}
template<typename T>
bool interval_check(const T* xb_, const T* xq_,
const T* rg_, const int_t d) {
bool should_in = true;
for (int di = 0; di < d; ++di) {
T diff = *xb_++ - *xq_++;
T range = *rg_++;
if (diff > range || diff < -range) {
should_in = false;
break;
}
}
return should_in;
}
template<typename T>
bool interval_check(const T* xb_, const T* x_rg_,
const T* xq_, const T* q_rg_, const int_t d) {
bool should_in = true;
for (int di = 0; di < d; ++di) {
T diff = *xb_++ - *xq_++;
T range = *x_rg_++ + *q_rg_++;
if (diff > range || diff < -range) {
should_in = false;
break;
}
}
return should_in;
}
template<typename T, class Container>
std::vector<int_t >
interval_prune(const T* xb_, const T* xq_, const T* rg_,
const Container& xs_id, const int_t d) {
std::vector<int > res;
for (int xi : xs_id) {
if (interval_check(xb_ + xi * d, xq_, rg_, d)) {
res.push_back(xi);
}
}
return res;
}
template<typename T>
std::vector<std::vector<int_t > >
interval_search(const T* xb_, const T* xq_, const T* rg_,
const int_t nx, const int_t nq, const int_t d) {
std::vector<std::vector<int_t > > res(nq);
#ifdef verbose
ProgressBar progress_bar(nq, "interval search");
#endif
#ifdef MULTI_THREAD
#pragma omp parallel for
#endif
for (int qi = 0; qi < nq; ++qi) {
for (int xi = 0; xi < nx; ++xi) {
bool should_in = true;
for (int di = 0; di < d; ++di) {
T diff = xb_[xi * d + di] - xq_[qi * d + di];
T range = rg_[qi * d + di];
if (diff > range || diff < -range) {
should_in = false;
break;
}
}
if (should_in) {
res[qi].push_back(xi);
}
}
#ifdef verbose
#pragma omp critical
{
++progress_bar;
}
#endif
}
return res;
}
template <size_t p>
float int_power(float t) {
if constexpr (p == 1) {
return std::abs(t);
}
if constexpr (p == 2) {
return t * t;
}
if constexpr (p == 3) {
t = std::abs(t);
return t * t * t;
}
if constexpr (p == 4) {
t = t * t;
return t * t;
}
if constexpr (p == 8) {
t = t * t;
t = t * t;
return t * t;
}
if constexpr (p == 16) {
t = t * t;
t = t * t;
t = t * t;
return t * t;
}
}
template<size_t p = 2, bool WEIGHTED = true>
float weighted_dist(const float * q, const float* w, const float* x, const int_t d) {
float dist = 0.;
for (int i = 0; i < d; ++i) {
float t = (*q++ - *x++);
if constexpr (WEIGHTED) {
t /= (DELTA + *w++);
}
dist += int_power<p>(t);
}
return dist;
}
template <typename T>
vector<int_t> arg_sort(T* v, int_t n) {
// initialize original index locations
vector<int_t> idx(n);
iota(idx.begin(), idx.end(), 0);
// sort indexes based on comparing values in v
// using std::stable_sort instead of std::sort
// to avoid unnecessary index re-orderings
// when v contains elements of equal values
stable_sort(idx.begin(), idx.end(),
[&v](size_t i1, size_t i2) {return v[i1] < v[i2];});
return idx;
}
template<typename T>
void normal_random_fill(T* ptr, int n, T mean, T std) {
static std::random_device rd;
static std::mt19937_64 eng(rd());
static std::normal_distribution<T > dist(mean, std);
for (int i = 0; i < n; ++i) {
ptr[i] = dist(eng);
}
}
template<typename T>
void uniform_random_fill(T* ptr, int n, T lower, T upper) {
static std::random_device rd;
static std::mt19937_64 eng(rd());
static std::uniform_real_distribution<T > dist(lower, upper);
for (int i = 0; i < n; ++i) {
ptr[i] = dist(eng);
}
}
vector<vector<int_t > > read_list(const char* file_name) {
string line;
std::ifstream file_stream(file_name);
vector<vector<int_t > > res;
while(getline(file_stream, line)) {
std::istringstream iss(line);
res.emplace_back();
int_t id;
while (iss >> id) {
res.back().push_back(id);
}
}
return res;
}
void write_list(const char* file_name, vector<vector<int_t > > lists) {
std::ofstream file_stream(file_name);
if (file_stream.is_open()) {
for (vector<int_t> ids : lists) {
if (!ids.empty()) {
file_stream << ids[0];
}
for (int i = 1; i < ids.size(); ++i) {
file_stream << " " << ids[i];
}
file_stream << "\n";
}
file_stream.close();
}
else {
std::cout << "Unable to open file : " << file_name << std::endl;
}
}
template <bool Compute=false>
vector<vector<int_t > >
load_gt(const char* fg, const float* xb,
const float* xq, const float* rg,
int_t nb, int_t nq, int_t dimension) {
if constexpr (Compute) {
return interval_search<float >(xb, xq, rg, nb, nq, dimension);
}
if (exists_test(fg)) {
return read_list(fg);
} else {
StopW stop_w;
auto ground_truth = interval_search<float >(xb, xq, rg, nb, nq, dimension);
std::cout << "average brute force search time : "
<< stop_w.getElapsedTimeMicro(true) / nq << std::endl;
write_list(fg, ground_truth);
return ground_truth;
}
}
#endif //PROGRESS_BAR_H_
|
l8t1qa.c | #include<stdio.h>
#include "gdal.h"
#include<omp.h>
/* L8 Qa bits [4] Cloud
* 0 -> class 0: Not Cloud
* 1 -> class 1: Cloud
*/
unsigned int L8QA_cloud(unsigned int pixel) {
unsigned int qa;
pixel >>= 4; /*bits [4] become [0]*/
qa = (unsigned int) (pixel & 0x01);
return qa;
}
/* L8 Qa bits [5-6] Cloud confidence
* 00 -> class 0: Not determined
* 01 -> class 1: No Cloud (0-33% probability)
* 10 -> class 2: Maybe Cloud (34-66% probability)
* 11 -> class 3: Cloud (66-100% probability)
*/
unsigned int L8QA_cloud_confidence(unsigned int pixel) {
unsigned int qa;
pixel >>= 5; /*bits [5-6] become [0-1]*/
qa = (unsigned int) (pixel & 0x03);
return qa;
}
/* L8 Qa bits [7-8] Cloud shadow confidence
* 00 -> class 0: Not determined
* 01 -> class 1: No Cloud shadow (0-33% probability)
* 10 -> class 2: Maybe Cloud shadow (34-66% probability)
* 11 -> class 3: Cloud shadow (66-100% probability)
*/
unsigned int L8QA_cloud_shadow(unsigned int pixel) {
unsigned int qa;
pixel >>= 7; /*bits [7-8] become [0-1]*/
qa = (unsigned int) (pixel & 0x03);
return qa;
}
/* L8 Qa bits [11-12] Cirrus confidence
* 00 -> class 0: Not determined
* 01 -> class 1: No Cirrus (0-33% probability)
* 10 -> class 2: Maybe Cirrus (34-66% probability)
* 11 -> class 3: Cirrus (66-100% probability)
*/
unsigned int L8QA_cirrus_confidence(unsigned int pixel) {
unsigned int qa;
pixel >>= 11; /*bits [11-12] become [0-1]*/
qa = (unsigned int) (pixel & 0x03);
return qa;
}
void usage()
{
printf( "-----------------------------------------\n");
printf( "--L8 Processing chain--OpenMP code----\n");
printf( "-----------------------------------------\n");
printf( "./L8 inL8b4 inL8b5 inL8b6 inL8B7 inL8_QA\n");
printf( "\toutL8vi outL8wi outL8LSWI outL8NBR2\n");
printf( "-----------------------------------------\n");
printf( "inL8b4\t\tL8 Band 4 UInt16 (Red)\n");
printf( "inL8b5\t\tL8 Band 5 UInt16 (NIR)\n");
printf( "inL8b6\t\tL8 Band 6 UInt16 (SWIR1)\n");
printf( "inL8b7\t\tL8 Band 7 UInt16 (SWIR2)\n");
printf( "inL8_QA\t\tL8_Qa UInt16\n");
printf( "outL8vi\tCloud removed L8 NDVI output [0-10000]\n");
printf( "outL8wi\tCloud removed L8 NDWI output [0-10000]\n");
printf( "outL8lswi\tCloud removed L8 LSWI output [0-10000]\n");
printf( "outL8nbr2\tCloud removed L8 NBR2 output [0-10000]\n");
return;
}
int main( int argc, char *argv[] )
{
if( argc < 9 ) {
usage();
return 1;
}
char *inB4 = argv[1]; //L8 band 4 (Red)
char *inB5 = argv[2]; //L8 band 5 (NIR)
char *inB6 = argv[3]; //L8 band 6 (SWIR1)
char *inB7 = argv[4]; //L8 band 7 (SIWR2)
char *inB8 = argv[5]; //L8_QA
char *L8viF = argv[6]; //OUT NDVI
char *L8wiF = argv[7]; //OUT NDWI
char *L8lswiF= argv[8]; //OUT LSWI
char *L8nbr2F= argv[9]; //OUT NBR2
GDALAllRegister();
GDALDatasetH hD4 = GDALOpen(inB4,GA_ReadOnly);//L8 band 4 (Red)
GDALDatasetH hD5 = GDALOpen(inB5,GA_ReadOnly);//L8 band 5 (NIR)
GDALDatasetH hD6 = GDALOpen(inB6,GA_ReadOnly);//L8 band 6 (SWIR1)
GDALDatasetH hD7 = GDALOpen(inB7,GA_ReadOnly);//L8 band 7 (SWIR2)
GDALDatasetH hD8 = GDALOpen(inB8,GA_ReadOnly);//L8_QA
if(hD4==NULL||hD5==NULL||hD6==NULL||hD7==NULL||hD8==NULL){
printf("One or more input files ");
printf("could not be loaded\n");
exit(1);
}
GDALDriverH hDr4 = GDALGetDatasetDriver(hD4);
char **options = NULL;
options = CSLSetNameValue( options, "TILED", "YES" );
options = CSLSetNameValue( options, "COMPRESS", "DEFLATE" );
options = CSLSetNameValue( options, "PREDICTOR", "2" );
GDALDatasetH hDOutVI = GDALCreateCopy(hDr4,L8viF,hD4,FALSE,options,NULL,NULL);
GDALDatasetH hDOutWI = GDALCreateCopy(hDr4,L8wiF,hD4,FALSE,options,NULL,NULL);
GDALDatasetH hDOutLSWI = GDALCreateCopy(hDr4,L8lswiF,hD4,FALSE,options,NULL,NULL);
GDALDatasetH hDOutNBR2 = GDALCreateCopy(hDr4,L8nbr2F,hD4,FALSE,options,NULL,NULL);
GDALRasterBandH hBOutVI = GDALGetRasterBand(hDOutVI,1);
GDALRasterBandH hBOutWI = GDALGetRasterBand(hDOutWI,1);
GDALRasterBandH hBOutLSWI = GDALGetRasterBand(hDOutLSWI,1);
GDALRasterBandH hBOutNBR2 = GDALGetRasterBand(hDOutNBR2,1);
GDALRasterBandH hB4 = GDALGetRasterBand(hD4,1);//L8 band 4 (Red)
GDALRasterBandH hB5 = GDALGetRasterBand(hD5,1);//L8 band 5 (NIR)
GDALRasterBandH hB6 = GDALGetRasterBand(hD6,1);//L8 band 6 (SWIR1)
GDALRasterBandH hB7 = GDALGetRasterBand(hD7,1);//L8 band 7 (SWIR2)
GDALRasterBandH hB8 = GDALGetRasterBand(hD8,1);//L8_QA
int nX = GDALGetRasterBandXSize(hB4);
int nY = GDALGetRasterBandYSize(hB4);
int N=nX*nY;
unsigned int *l4 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *l5 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *l6 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *l7 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *l8 = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *lOutVI = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *lOutWI = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *lOutLSWI = (unsigned int *) malloc(sizeof(unsigned int)*N);
unsigned int *lOutNBR2 = (unsigned int *) malloc(sizeof(unsigned int)*N);
int rc, qac, qacc, qacs, qaci;
//L8 band 4/5/6/7 (red/NIR/SWIR1/SWIR2)
GDALRasterIO(hB4,GF_Read,0,0,nX,nY,l4,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hB5,GF_Read,0,0,nX,nY,l5,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hB6,GF_Read,0,0,nX,nY,l6,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hB7,GF_Read,0,0,nX,nY,l7,nX,nY,GDT_UInt32,0,0);
//L8_QA
GDALRasterIO(hB8,GF_Read,0,0,nX,nY,l8,nX,nY,GDT_UInt32,0,0);
//Get the number of threads available
int n = omp_get_num_threads();
//Do not stall the computer
omp_set_num_threads(n-1);
#pragma omp parallel for default(none) \
private (rc, qac, qacc, qacs, qaci) \
shared (N, l4, l5, l6, l7, l8, lOutVI, lOutWI, lOutLSWI, lOutNBR2)
for(rc=0;rc<N;rc++){
/*process QAs*/
/*QA cloud: bit 4*/
qac=L8QA_cloud(l8[rc]);
/*QA cloud confidence: bits 5-6*/
qacc=L8QA_cloud_confidence(l8[rc]);
/*QA cloud shadow: bits 7-8*/
qacs=L8QA_cloud_shadow(l8[rc]);
/*QA cirrus confidence: bits 11-12*/
qaci=L8QA_cirrus_confidence(l8[rc]);
/*No Data in this pixel: [UInt16 val == 1] => -32768*/
if(l8[rc]==1){
lOutVI[rc] = 32768;
lOutWI[rc] = 32768;
lOutLSWI[rc] = 32768;
lOutNBR2[rc] = 32768;
/*If clouds, or cloud[shadow][cirrus] confidence QA==[00,01]->[0,1] then mask the pixel*/
}else if(qac == 1 || qacc > 2 || qacs > 2 || qaci > 2){
lOutVI[rc] = 32767;
lOutWI[rc] = 32767;
lOutLSWI[rc] = 32767;
lOutNBR2[rc] = 32767;
/*Finally, all sufficiently less cloud confident or not cloud for sure, use the band pixel value*/
}else{
/*process NDVI*/
if((l5[rc]+l4[rc])==0){
lOutVI[rc]=32768;
}else{
lOutVI[rc]=(int)(10000.0*l5[rc]-l4[rc])/(1.0*l5[rc]+l4[rc]);
}
/*process NDWI*/
if((l6[rc]+l5[rc])==0){
lOutWI[rc]=32768;
}else{
lOutWI[rc]=(int)(10000.0*l6[rc]-l5[rc])/(1.0*l6[rc]+l5[rc]);
}
/*process LSWI*/
if((l6[rc]+l5[rc])==0){
lOutLSWI[rc]=32768;
}else{
lOutLSWI[rc]=(int)(10000.0*l5[rc]-l6[rc])/(1.0*l5[rc]+l6[rc]);
}
/*process NBR2*/
if((l6[rc]+l7[rc])==0){
lOutNBR2[rc]=32768;
}else{
lOutNBR2[rc]=(int)(10000.0*l6[rc]-l7[rc])/(1.0*l6[rc]+l7[rc]);
}
}
}
#pragma omp barrier
GDALRasterIO(hBOutVI,GF_Write,0,0,nX,nY,lOutVI,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBOutWI,GF_Write,0,0,nX,nY,lOutWI,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBOutLSWI,GF_Write,0,0,nX,nY,lOutLSWI,nX,nY,GDT_UInt32,0,0);
GDALRasterIO(hBOutNBR2,GF_Write,0,0,nX,nY,lOutNBR2,nX,nY,GDT_UInt32,0,0);
if( l4 != NULL ) free( l4 );
if( l5 != NULL ) free( l5 );
if( l6 != NULL ) free( l6 );
if( l7 != NULL ) free( l7 );
if( l8 != NULL ) free( l8 );
GDALClose(hD4);
GDALClose(hD5);
GDALClose(hD6);
GDALClose(hD7);
GDALClose(hD8);
GDALClose(hDOutVI);
GDALClose(hDOutWI);
GDALClose(hDOutLSWI);
GDALClose(hDOutNBR2);
return(EXIT_SUCCESS);
}
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype,
SourceLocation SwiftNewtypeLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
void ParsePtrauthQualifier(ParsedAttributes &Attrs);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parses OpenMP context selectors and calls \p Callback for each
/// successfully parsed context selector.
bool parseOpenMPContextSelectors(
SourceLocation Loc,
llvm::function_ref<
void(SourceRange, const Sema::OpenMPDeclareVariantCtsSelectorData &)>
Callback);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType(bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
/// Parse the given string as a type.
///
/// This is a dangerous utility function currently employed only by API notes.
/// It is not a general entry-point for safely parsing types from strings.
///
/// \param typeStr The string to be parsed as a type.
/// \param context The name of the context in which this string is being
/// parsed, which will be used in diagnostics.
/// \param includeLoc The location at which this parse was triggered.
TypeResult parseTypeFromString(StringRef typeStr, StringRef context,
SourceLocation includeLoc);
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
ExprResult ParseBuiltinPtrauthTypeDiscriminator();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
main.c | void foo(int N, double *A) {
#pragma omp parallel default(shared)
{
#pragma omp for
for (int I = 0; I < N; ++I) {
A[I] = I;
}
}
}
|
c_timers.c | #include "wtime.h"
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* Prototype */
void wtime( double * );
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
#ifdef _OPENMP
/* Use the OpenMP timer if we can */
t = omp_get_wtime();
#else
wtime( &t );
#endif
return( t );
}
static double start[64], elapsed[64];
#ifdef _OPENMP
#pragma omp threadprivate(start, elapsed)
#endif
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return( elapsed[n] );
}
|
binarytrees.c | // The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// Contributed by Jeremy Zerfas
// Based on the C++ program from Jon Harrop, Alex Mizrahi, and Bruno Coutinho.
// This controls the width of lines that are output by this program.
#define MAXIMUM_LINE_WIDTH 60
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
typedef off_t off64_t; // This is needed to keep APR happy on 32 bit systems.
#include <apr_pools.h>
// intptr_t should be the native integer type on most sane systems.
typedef intptr_t intnative_t;
typedef struct tree_node{
struct tree_node * left_Node, * right_Node;
int32_t value;
} tree_node;
// Create a binary tree of depth tree_Depth in memory_Pool, set the root node's
// value to root_Node_Value, and finally return a pointer to the created binary
// tree.
static inline tree_node * create_Tree(const intnative_t root_Node_Value,
const intnative_t tree_Depth, apr_pool_t * const memory_Pool){
tree_node * const root_Node=apr_palloc(memory_Pool, sizeof(tree_node));
// If tree_Depth is one or more then recursively call create_Tree() in order
// to create the left and right subtrees using 2*root_Node_Value-1 and
// 2*root_Node_Value respectively as the root values for those subtrees.
if(tree_Depth>0){
root_Node->left_Node=create_Tree(2*root_Node_Value-1, tree_Depth-1,
memory_Pool);
root_Node->right_Node=create_Tree(2*root_Node_Value, tree_Depth-1,
memory_Pool);
}else
root_Node->left_Node=root_Node->right_Node=NULL;
root_Node->value=root_Node_Value;
return root_Node;
}
// Compute and return the checksum for the binary tree that has root_Node as the
// root node.
static inline intnative_t compute_Tree_Checksum(
const tree_node * const root_Node){
// If there are subtrees then recursively call compute_Tree_Checksum() on
// them and factor their values into the checksum, otherwise just return
// the value of root_Node.
if(root_Node->left_Node)
return compute_Tree_Checksum(root_Node->left_Node)-
compute_Tree_Checksum(root_Node->right_Node)+root_Node->value;
else
return root_Node->value;
}
void run(int input){
// Set minimum_Tree_Depth to 4 and maximum_Tree_Depth to the maximum of what
// was specified as the argument to the program and minimum_Tree_Depth+2.
const intnative_t minimum_Tree_Depth=4;
intnative_t maximum_Tree_Depth=input;
if(maximum_Tree_Depth < minimum_Tree_Depth+2)
maximum_Tree_Depth=minimum_Tree_Depth+2;
apr_initialize();
apr_pool_t * memory_Pool;
// Create a memory pool, create a binary tree of depth maximum_Tree_Depth+1,
// compute the checksum of the binary tree, print the statistics, and then
// delete the memory pool.
apr_pool_create_unmanaged(&memory_Pool);
tree_node * stretch_Tree=create_Tree(0, maximum_Tree_Depth+1, memory_Pool);
printf("stretch tree of depth %jd\t check: %jd\n",
(intmax_t)maximum_Tree_Depth+1,
(intmax_t)compute_Tree_Checksum(stretch_Tree));
apr_pool_destroy(memory_Pool);
// Create a memory pool and then create a long-lived binary tree of depth
// maximum_Tree_Depth which will be left alone for a while while
// more binary trees get allocated and deallocaited as required by the
// rules. We'll finish working with this later.
apr_pool_create_unmanaged(&memory_Pool);
tree_node * long_Lived_Tree=create_Tree(0, maximum_Tree_Depth, memory_Pool);
// Create a lot of binary trees in parallel of depths ranging from
// minimum_Tree_Depth to maximum_Tree_Depth, compute and tally up all their
// checksums, destroy the trees, and then record the statistics to
// output_Buffer[] so they can be displayed in order later.
char output_Buffer[maximum_Tree_Depth+1][MAXIMUM_LINE_WIDTH+1];
intnative_t current_Tree_Depth;
#pragma omp parallel for
for(current_Tree_Depth=minimum_Tree_Depth;
current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2){
intnative_t iterations=1<<(maximum_Tree_Depth-current_Tree_Depth+
minimum_Tree_Depth);
// Create a memory pool for this thread to use.
apr_pool_t * thread_Memory_Pool;
apr_pool_create_unmanaged(&thread_Memory_Pool);
intnative_t i=1, total_Trees_Checksum=0;
for(; i<=iterations; ++i){
// Create two binary trees of depth current_Tree_Depth but with one
// having a root node value of i and the other a root node value of
// -1.
tree_node * const tree_1=create_Tree(i, current_Tree_Depth,
thread_Memory_Pool);
tree_node * const tree_2=create_Tree(-i, current_Tree_Depth,
thread_Memory_Pool);
// Compute the checksums for both trees and add them to
// total_Trees_Checksum.
total_Trees_Checksum+=compute_Tree_Checksum(tree_1)+
compute_Tree_Checksum(tree_2);
apr_pool_clear(thread_Memory_Pool);
}
apr_pool_destroy(thread_Memory_Pool);
// Record the statistics for the trees of depth current_Tree_Depth.
sprintf(output_Buffer[current_Tree_Depth],
"%jd\t trees of depth %jd\t check: %jd\n", (intmax_t)2*iterations,
(intmax_t)current_Tree_Depth, (intmax_t)total_Trees_Checksum);
}
// Print the statistics for all of the various tree depths.
for(current_Tree_Depth=minimum_Tree_Depth;
current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2)
printf("%s", output_Buffer[current_Tree_Depth]);
// Compute the checksum of the long-lived binary tree that we created
// earlier, print the statistics, and then delete the memory pool.
printf("long lived tree of depth %jd\t check: %jd\n",
(intmax_t)maximum_Tree_Depth,
(intmax_t)compute_Tree_Checksum(long_Lived_Tree));
apr_pool_destroy(memory_Pool);
apr_terminate();
}
/*
notes, command-line, and program output
NOTES:
64-bit Ubuntu quad core
gcc (Ubuntu 5.4.0-6ubuntu1~16.04.1) 5.4.0 20160609
Sun, 24 Apr 2016 15:53:29 GMT
MAKE:
/usr/bin/gcc -pipe -Wall -O3 -fomit-frame-pointer -march=native -fopenmp -D_FILE_OFFSET_BITS=64 -I/usr/include/apr-1.0 binarytrees.gcc-3.c -o binarytrees.gcc-3.gcc_run -lapr-1 -lgomp -lm
rm binarytrees.gcc-3.c
4.64s to complete and log all make actions
COMMAND LINE:
./binarytrees.gcc-3.gcc_run 20
PROGRAM OUTPUT:
stretch tree of depth 21 check: -1
2097152 trees of depth 4 check: -2097152
524288 trees of depth 6 check: -524288
131072 trees of depth 8 check: -131072
32768 trees of depth 10 check: -32768
8192 trees of depth 12 check: -8192
2048 trees of depth 14 check: -2048
512 trees of depth 16 check: -512
128 trees of depth 18 check: -128
32 trees of depth 20 check: -32
long lived tree of depth 20 check: -1
*/
|
GB_unop__identity_int32_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int32_uint64
// op(A') function: GB_unop_tran__identity_int32_uint64
// C type: int32_t
// A type: uint64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = (int32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int32_uint64
(
int32_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
int32_t z = (int32_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
power.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include "nb/memory_bot.h"
#include "nb/solver_bot/vector.h"
#include "nb/solver_bot/sparse/sparse.h"
#include "nb/solver_bot/sparse/eigen/power.h"
#include "../sparse_struct.h"
#define POW2(a) ((a)*(a))
void nb_sparse_eigen_power(const nb_sparse_t* const A, int h,
double **_eigenvecs,/* Out */
double *_eigenvals, /* Out */
int *it, /* Out */
double tolerance,
uint32_t omp_parallel_threads){
/* The program must receive all the pointers allocated, where
* > A is a nb_sparse_t matrix
* > _eigenvecs is an array of size h to store h eigenvectors.
* > _eigenvals is an array of size h to store the h greatest
* eigenvalues approximated.
* > h is the number of eigenvalues to be computed.
* > '*it' will store (after computation) the iterations needed
* to compute each eigenvalue (is a return value).
*/
/* Declare structures and variables to be used */
uint32_t i, j, c, d; /* Iterative variables */
double pnorm, rnorm2;
/* Allocate memory for structures */
double *p = nb_allocate_zero_mem(A->N * sizeof(double));
/* Deflation power method */
for (i = 0; i < h; i++) {
it[i] = 0;
rnorm2 = 1;
/* Initialize q0 such that ||qk||=1 */
_eigenvecs[i][0] = 1;
for (j = 1; j < A->N; j++)
_eigenvecs[i][j] = 0;
for (c = 0; c < A->N; c++) {
p[c] = 0;
if(A->rows_index[c][0] == 0)
p[c] = A->rows_values[c][0];
}
/* Start loop */
while (rnorm2 > POW2(tolerance)) {
/* Step 1 */
pnorm = nb_vector_get_norm(p, A->N);
for (c = 0; c < A->N; c++)
_eigenvecs[i][c] = p[c]/pnorm;
/* Step 2 */
for (j = 0; j < i; j++) {
double alpha = 0;
#pragma omp parallel for reduction(+:alpha) num_threads(omp_parallel_threads) schedule(guided) private(c)
for(c=0; c < A->N; c++)
alpha += _eigenvecs[i][c]*_eigenvecs[j][c];
#pragma omp parallel for num_threads(omp_parallel_threads) private(c)
for(c=0; c < A->N; c++)
_eigenvecs[i][c] -= alpha*_eigenvecs[j][c];
}
/* Step 3 */
/* Paralelize the operation pk = A*qk */
#pragma omp parallel for schedule(guided) num_threads(omp_parallel_threads) private(c, d)
for(c=0; c < A->N; c++){
p[c] = 0;
for(d=0; d < A->rows_size[c]; d++)
p[c] += A->rows_values[c][d]
*_eigenvecs[i][A->rows_index[c][d]];
}
/* Step 4 */
double lambda = 0;
for(c=0; c < A->N; c++)
lambda += _eigenvecs[i][c]*p[c];
_eigenvals[i] = lambda;
/* Step 5 and 6 */
rnorm2 = 0;
for(c=0; c < A->N; c++)
rnorm2 += POW2(p[c]-lambda*_eigenvecs[i][c]);
it[i]++;
}
}
/* Free memory */
nb_free_mem(p);
}
|
DRB003-antidep2-orig-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A two-level loop nest with loop carried anti-dependence on the outer level.
Data race pair: a[i][j]@67:7 vs. a[i+1][j]@67:18
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char * argv[])
{
int i, j;
int len = 20;
double a[20][20];
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<len; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<len; j ++ )
{
a[i][j]=(((i*len)+j)+0.5);
}
}
#pragma cetus private(i, j)
#pragma loop name main#1
for (i=0; i<(len-1); i+=1)
{
#pragma cetus private(j)
#pragma loop name main#1#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<len; j+=1)
{
a[i][j]+=a[i+1][j];
}
}
#pragma cetus private(i, j)
#pragma loop name main#2
for (i=0; i<len; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#2#0
for (j=0; j<len; j ++ )
{
printf("%lf", a[i][j]);
}
}
printf("a[10][10]=%f\n", a[10][10]);
_ret_val_0=0;
return _ret_val_0;
}
|
a.29.1.c | /* { dg-do run } */
#include <assert.h>
int A[2][2] = { 1, 2, 3, 4 };
void
f (int n, int B[n][n], int C[])
{
int D[2][2] = { 1, 2, 3, 4 };
int E[n][n];
assert (n >= 2);
E[1][1] = 4;
#pragma omp parallel firstprivate(B, C, D, E)
{
assert (sizeof (B) == sizeof (int (*)[n]));
assert (sizeof (C) == sizeof (int *));
assert (sizeof (D) == 4 * sizeof (int));
assert (sizeof (E) == n * n * sizeof (int));
/* Private B and C have values of original B and C. */
assert (&B[1][1] == &A[1][1]);
assert (&C[3] == &A[1][1]);
assert (D[1][1] == 4);
assert (E[1][1] == 4);
}
}
int
main ()
{
f (2, A, A[0]);
return 0;
}
|
owl_aeos_tuner_map_impl.h | /*
* OWL - OCaml Scientific Computing
* Copyright (c) 2016-2022 Liang Wang <liang@ocaml.xyz>
*/
#ifdef FUN4
CAMLprim value BASE_FUN4(value vN, value vX, value vY) {
CAMLparam3(vN, vX, vY);
int N = Long_val(vN);
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
NUMBER1 *Y_data = (NUMBER1 *) Y->data;
NUMBER *start_x, *stop_x;
NUMBER1 *start_y;
caml_release_runtime_system(); /* Allow other threads */
start_x = X_data;
stop_x = start_x + N;
start_y = Y_data;
while (start_x != stop_x) {
NUMBER x = *start_x;
*start_y = MAPFN(x);
start_x += 1;
start_y += 1;
};
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
CAMLprim value OMP_FUN4(value vN, value vX, value vY) {
CAMLparam3(vN, vX, vY);
int N = Long_val(vN);
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
NUMBER1 *Y_data = (NUMBER1 *) Y->data;
NUMBER *start_x, *stop_x;
NUMBER1 *start_y;
caml_release_runtime_system(); /* Allow other threads */
start_x = X_data;
stop_x = start_x + N;
start_y = Y_data;
#pragma omp parallel for schedule(static)
for (int i = 0; i < N; i++) {
NUMBER x = *(start_x + i);
*(start_y + i) = (MAPFN(x));
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
#endif /* FUN4 */
#ifdef BASE_FUN15
CAMLprim value BASE_FUN15(value vN, value vX, value vY, value vZ)
{
CAMLparam4(vN, vX, vY, vZ);
int N = Long_val(vN);
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
NUMBER1 *Y_data = (NUMBER1 *) Y->data;
struct caml_ba_array *Z = Caml_ba_array_val(vZ);
NUMBER2 *Z_data = (NUMBER2 *) Z->data;
NUMBER *start_x, *stop_x;
NUMBER1 *start_y;
NUMBER2 *start_z;
caml_release_runtime_system(); /* Allow other threads */
start_x = X_data;
stop_x = start_x + N;
start_y = Y_data;
start_z = Z_data;
for (int i = 0; i < N; i++) {
MAPFN((start_x + i), (start_y + i), (start_z + i));
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
CAMLprim value OMP_FUN15(value vN, value vX, value vY, value vZ)
{
CAMLparam4(vN, vX, vY, vZ);
int N = Long_val(vN);
struct caml_ba_array *X = Caml_ba_array_val(vX);
NUMBER *X_data = (NUMBER *) X->data;
struct caml_ba_array *Y = Caml_ba_array_val(vY);
NUMBER1 *Y_data = (NUMBER1 *) Y->data;
struct caml_ba_array *Z = Caml_ba_array_val(vZ);
NUMBER2 *Z_data = (NUMBER2 *) Z->data;
NUMBER *start_x, *stop_x;
NUMBER1 *start_y;
NUMBER2 *start_z;
caml_release_runtime_system(); /* Allow other threads */
start_x = X_data;
stop_x = start_x + N;
start_y = Y_data;
start_z = Z_data;
#pragma omp parallel for schedule(static)
for (int i = 0; i < N; i++) {
MAPFN((start_x + i), (start_y + i), (start_z + i));
}
caml_acquire_runtime_system(); /* Disallow other threads */
CAMLreturn(Val_unit);
}
#endif /* FUN15 */
#undef NUMBER
#undef NUMBER1
#undef NUMBER2
#undef MAPFN
#undef FUN4
#undef FUN15
#undef OMP_FUN4
#undef OMP_FUN15
#undef BASE_FUN4
#undef BASE_FUN15
|
omp_dr.h | /*
* OpenMP + dag_recorder
*/
/*
this file provides macros with which users can
easily turn on/off dag recorder for your OpenMP
task parallel programs.
provided macros are:
(i) pragma_omp_task(option, statement)
(ii) pragma_omp_taskc(option, callable)
(iii) pragma_omp_taskwait
they are respectively translated into
#pragma omp task option
statement
#pragma omp task option
callable()
#pragma omp taskwait
when DAG_RECORDER is set to a number >= 2,
they insert instrumentation code for dag
recorder.
ideally we like to instrument OpenMP
programs written with the regular
pragma's, but I don't know how to do
it. so we ask the programmer to write
OpenMP fragment such as
#pragma omp task shared(x)
x = foo();
as
pragma_omp_task(shared(x),
x = foo());
*/
#pragma once
#include <omp.h>
#include <dag_recorder.h>
#define do_pragma(x) _Pragma( #x )
#define pragma_omp(x) do_pragma(omp x)
#define pragma_omp_task_no_prof(options, statement) \
pragma_omp(task options) do { statement; } while(0)
#define pragma_omp_taskc_no_prof(options, callable) \
pragma_omp_task_no_prof(options, callable())
#define pragma_omp_taskwait_no_prof pragma_omp(taskwait)
#define pragma_omp_task_with_prof(options, statement) do { \
dr_dag_node * __c__ = 0; \
dr_dag_node * __t__ = dr_enter_create_task(&__c__); \
pragma_omp(task options) do { \
dr_start_task(__c__); \
statement; \
dr_end_task(); \
} while(0); \
dr_return_from_create_task(__t__); \
} while (0)
#define pragma_omp_task_with_prof_(options, statement, file, line) do { \
dr_dag_node * __c__ = 0; \
dr_dag_node * __t__ = dr_enter_create_task_(&__c__, file, line); \
pragma_omp(task options) do { \
dr_start_task_(__c__, file, line); \
statement; \
dr_end_task_(file, line); \
} while(0); \
dr_return_from_create_task_(__t__, file, line); \
} while (0)
#define pragma_omp_taskc_with_prof(options, callable) \
pragma_omp_task_with_prof(options, callable())
#define pragma_omp_taskwait_with_prof do { \
dr_dag_node * __t__ = dr_enter_wait_tasks(); \
pragma_omp(taskwait); \
dr_return_from_wait_tasks(__t__); \
} while(0)
#define pragma_omp_taskwait_with_prof_(file, line) do { \
dr_dag_node * __t__ = dr_enter_wait_tasks_(file, line); \
pragma_omp(taskwait); \
dr_return_from_wait_tasks_(__t__, file, line); \
} while(0)
#if DAG_RECORDER>=2
#define pragma_omp_task(options, statement) \
pragma_omp_task_with_prof(options, statement)
#define pragma_omp_task_(options, statement, file, line) \
pragma_omp_task_with_prof_(options, statement, file, line)
#define pragma_omp_taskc(options, callable) \
pragma_omp_taskc_with_prof(options, callable)
#define pragma_omp_taskwait pragma_omp_taskwait_with_prof
#define pragma_omp_taskwait_(file, line) pragma_omp_taskwait_with_prof_(file, line)
#define dr_get_max_workers() (omp_in_parallel() ? omp_get_num_threads() : omp_get_max_threads())
#define dr_get_worker() omp_get_thread_num()
/* when using DAG Recorder with OpenMP task parallelism,
the following usual sequence needs to be instrumented
#pragma omp parallel
#pragma omp single
S;
to the following
{
dr_dag_node * __t__ = dr_enter_other();
#pragma omp parallel
#pragma omp single
{
dr_return_from_other(__t__);
S;
__t__ = dr_enter_other();
}
dr_return_from_other(__t__);
}
*/
#define pragma_omp_parallel_single(clause, S) \
do { \
dr_dag_node * __t__ = dr_enter_other(); \
pragma_omp(parallel) { \
pragma_omp(single clause) { \
dr_return_from_other(__t__); \
S \
__t__ = dr_enter_other(); \
} \
} \
dr_return_from_other(__t__); \
} while(0)
#else
#define pragma_omp_task(options, statement) \
pragma_omp_task_no_prof(options, statement)
#define pragma_omp_task_(options, statement, file, line) \
pragma_omp_task_no_prof(options, statement)
#define pragma_omp_taskc(options, callable) \
pragma_omp_taskc_no_prof(options, callable)
#define pragma_omp_taskwait pragma_omp_taskwait_no_prof
#define pragma_omp_taskwait_(file, line) pragma_omp_taskwait_no_prof
#define pragma_omp_parallel_single(clause, S) \
do { \
pragma_omp(parallel) { \
pragma_omp(single clause) { \
S \
} \
} \
} while(0)
#endif
|
GB_binop__div_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__div_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__div_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__div_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_fp32)
// A*D function (colscale): GB (_AxD__div_fp32)
// D*A function (rowscale): GB (_DxB__div_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__div_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__div_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_fp32)
// C=scalar+B GB (_bind1st__div_fp32)
// C=scalar+B' GB (_bind1st_tran__div_fp32)
// C=A+scalar GB (_bind2nd__div_fp32)
// C=A'+scalar GB (_bind2nd_tran__div_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = (aij / bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x / y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_FP32 || GxB_NO_DIV_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x / bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij / y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x / aij) ; \
}
GrB_Info GB (_bind1st_tran__div_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij / y) ; \
}
GrB_Info GB (_bind2nd_tran__div_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
setround_sse4.c | #include "mex.h"
#include <float.h>
#include <omp.h>
#pragma STDC FENV_ACCESS ON
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[])
{
int rnd, err, n, i;
unsigned int control_word;
rnd = mxGetScalar(prhs[0]);
n=omp_get_max_threads();
//printf("%d\n",n);
#pragma omp parallel
{
#pragma omp for private(i)
for (i=0;i<n;i++)
{
//printf("OK\n");
switch (rnd) {
case -1 :
err = _controlfp_s(&control_word, _RC_DOWN, _MCW_RC);
break;
case 0 :
err = _controlfp_s(&control_word, _RC_NEAR, _MCW_RC);
break;
case 1 :
err = _controlfp_s(&control_word, _RC_UP, _MCW_RC);
break;
case 2 :
err = _controlfp_s(&control_word, _RC_CHOP, _MCW_RC);
break;
default :
err = _controlfp_s(&control_word, _RC_NEAR, _MCW_RC);
break;
}
}
}
} /* setround */
|
convolution_sgemm_pack8to1_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack8to1_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
#if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
extern void im2col_sgemm_pack8to1_int8_sse_avx512vnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack8to1_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
extern void im2col_sgemm_pack8to1_int8_sse_avxvnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack8to1_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
extern void im2col_sgemm_pack8to1_int8_sse_xop(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack8to1_int8_sse_xop(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#endif
{
#if __AVX2__
int remain_size_start = 0;
int nn_size = size >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
int64_t* tmpptr = tmp.channel(i / 4);
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__m256i _v = _mm256_loadu_si256((const __m256i*)img0);
_mm256_storeu_si256((__m256i*)tmpptr, _v);
tmpptr += 4;
img0 += size;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __AVX2__
int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
int64_t* tmpptr = tmp.channel(i / 2);
#endif
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
__m128i _v = _mm_loadu_si128((const __m128i*)img0);
_mm_storeu_si128((__m128i*)tmpptr, _v);
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __AVX2__
int64_t* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
int64_t* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
for (int q = 0; q < inch; q++)
{
const int64_t* img0 = (const int64_t*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
__m256i _sum00_11 = _mm256_setzero_si256();
__m256i _sum10_01 = _mm256_setzero_si256();
__m256i _sum02_13 = _mm256_setzero_si256();
__m256i _sum12_03 = _mm256_setzero_si256();
__m256i _sum04_15 = _mm256_setzero_si256();
__m256i _sum14_05 = _mm256_setzero_si256();
__m256i _sum06_17 = _mm256_setzero_si256();
__m256i _sum16_07 = _mm256_setzero_si256();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16);
_sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16);
_sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16);
_sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16);
#else
__m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16);
__m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16);
__m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16);
__m256i _sl02_13 = _mm256_mullo_epi16(_val01_16, _w23_16);
__m256i _sh02_13 = _mm256_mulhi_epi16(_val01_16, _w23_16);
__m256i _sl12_03 = _mm256_mullo_epi16(_val10_16, _w23_16);
__m256i _sh12_03 = _mm256_mulhi_epi16(_val10_16, _w23_16);
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpacklo_epi16(_sl00_11, _sh00_11));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpacklo_epi16(_sl10_01, _sh10_01));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpacklo_epi16(_sl02_13, _sh02_13));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpacklo_epi16(_sl12_03, _sh12_03));
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpackhi_epi16(_sl00_11, _sh00_11));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpackhi_epi16(_sl10_01, _sh10_01));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpackhi_epi16(_sl02_13, _sh02_13));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpackhi_epi16(_sl12_03, _sh12_03));
#endif
__m128i _val23 = _mm_loadu_si128((const __m128i*)(tmpptr + 16));
__m256i _val23_16 = _mm256_cvtepi8_epi16(_val23);
__m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum04_15 = _mm256_dpwssd_epi32(_sum04_15, _val23_16, _w01_16);
_sum14_05 = _mm256_dpwssd_epi32(_sum14_05, _val32_16, _w01_16);
_sum06_17 = _mm256_dpwssd_epi32(_sum06_17, _val23_16, _w23_16);
_sum16_07 = _mm256_dpwssd_epi32(_sum16_07, _val32_16, _w23_16);
#else
__m256i _sl04_15 = _mm256_mullo_epi16(_val23_16, _w01_16);
__m256i _sh04_15 = _mm256_mulhi_epi16(_val23_16, _w01_16);
__m256i _sl14_05 = _mm256_mullo_epi16(_val32_16, _w01_16);
__m256i _sh14_05 = _mm256_mulhi_epi16(_val32_16, _w01_16);
__m256i _sl06_17 = _mm256_mullo_epi16(_val23_16, _w23_16);
__m256i _sh06_17 = _mm256_mulhi_epi16(_val23_16, _w23_16);
__m256i _sl16_07 = _mm256_mullo_epi16(_val32_16, _w23_16);
__m256i _sh16_07 = _mm256_mulhi_epi16(_val32_16, _w23_16);
_sum04_15 = _mm256_add_epi32(_sum04_15, _mm256_unpacklo_epi16(_sl04_15, _sh04_15));
_sum14_05 = _mm256_add_epi32(_sum14_05, _mm256_unpacklo_epi16(_sl14_05, _sh14_05));
_sum06_17 = _mm256_add_epi32(_sum06_17, _mm256_unpacklo_epi16(_sl06_17, _sh06_17));
_sum16_07 = _mm256_add_epi32(_sum16_07, _mm256_unpacklo_epi16(_sl16_07, _sh16_07));
_sum04_15 = _mm256_add_epi32(_sum04_15, _mm256_unpackhi_epi16(_sl04_15, _sh04_15));
_sum14_05 = _mm256_add_epi32(_sum14_05, _mm256_unpackhi_epi16(_sl14_05, _sh14_05));
_sum06_17 = _mm256_add_epi32(_sum06_17, _mm256_unpackhi_epi16(_sl06_17, _sh06_17));
_sum16_07 = _mm256_add_epi32(_sum16_07, _mm256_unpackhi_epi16(_sl16_07, _sh16_07));
#endif
tmpptr += 32;
kptr0 += 32;
}
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01);
_tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01);
_tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum04_15, _sum14_05);
_tmp1 = _mm256_unpacklo_epi32(_sum06_17, _sum16_07);
_tmp2 = _mm256_unpackhi_epi32(_sum04_15, _sum14_05);
_tmp3 = _mm256_unpackhi_epi32(_sum06_17, _sum16_07);
_sum04_15 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum14_05 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum06_17 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum16_07 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01);
_sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13);
_sum04_15 = _mm256_add_epi32(_sum04_15, _sum14_05);
_sum06_17 = _mm256_add_epi32(_sum06_17, _sum16_07);
_sum04_15 = _mm256_add_epi32(_sum04_15, _sum06_17);
__m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0);
_sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask);
_sum04_15 = _mm256_permutevar8x32_epi32(_sum04_15, _perm_mask);
int sum[16];
_mm256_storeu_si256((__m256i*)sum, _sum00_11);
_mm256_storeu_si256((__m256i*)(sum + 8), _sum04_15);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0[1] = sum[4];
outptr1[1] = sum[5];
outptr2[1] = sum[6];
outptr3[1] = sum[7];
outptr0[2] = sum[8];
outptr1[2] = sum[9];
outptr2[2] = sum[10];
outptr3[2] = sum[11];
outptr0[3] = sum[12];
outptr1[3] = sum[13];
outptr2[3] = sum[14];
outptr3[3] = sum[15];
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum00_11 = _mm256_setzero_si256();
__m256i _sum10_01 = _mm256_setzero_si256();
__m256i _sum02_13 = _mm256_setzero_si256();
__m256i _sum12_03 = _mm256_setzero_si256();
#else
__m128i _sum00 = _mm_setzero_si128();
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum02 = _mm_setzero_si128();
__m128i _sum03 = _mm_setzero_si128();
__m128i _sum10 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
__m128i _sum12 = _mm_setzero_si128();
__m128i _sum13 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_11 = _mm256_dpwssd_epi32(_sum00_11, _val01_16, _w01_16);
_sum10_01 = _mm256_dpwssd_epi32(_sum10_01, _val10_16, _w01_16);
_sum02_13 = _mm256_dpwssd_epi32(_sum02_13, _val01_16, _w23_16);
_sum12_03 = _mm256_dpwssd_epi32(_sum12_03, _val10_16, _w23_16);
#else
__m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16);
__m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16);
__m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16);
__m256i _sl02_13 = _mm256_mullo_epi16(_val01_16, _w23_16);
__m256i _sh02_13 = _mm256_mulhi_epi16(_val01_16, _w23_16);
__m256i _sl12_03 = _mm256_mullo_epi16(_val10_16, _w23_16);
__m256i _sh12_03 = _mm256_mulhi_epi16(_val10_16, _w23_16);
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpacklo_epi16(_sl00_11, _sh00_11));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpacklo_epi16(_sl10_01, _sh10_01));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpacklo_epi16(_sl02_13, _sh02_13));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpacklo_epi16(_sl12_03, _sh12_03));
_sum00_11 = _mm256_add_epi32(_sum00_11, _mm256_unpackhi_epi16(_sl00_11, _sh00_11));
_sum10_01 = _mm256_add_epi32(_sum10_01, _mm256_unpackhi_epi16(_sl10_01, _sh10_01));
_sum02_13 = _mm256_add_epi32(_sum02_13, _mm256_unpackhi_epi16(_sl02_13, _sh02_13));
_sum12_03 = _mm256_add_epi32(_sum12_03, _mm256_unpackhi_epi16(_sl12_03, _sh12_03));
#endif
#else
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
__m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
#if __XOP__
_sum00 = _mm_maddd_epi16(_val0, _w0, _sum00);
_sum01 = _mm_maddd_epi16(_val0, _w1, _sum01);
_sum02 = _mm_maddd_epi16(_val0, _w2, _sum02);
_sum03 = _mm_maddd_epi16(_val0, _w3, _sum03);
_sum10 = _mm_maddd_epi16(_val1, _w0, _sum10);
_sum11 = _mm_maddd_epi16(_val1, _w1, _sum11);
_sum12 = _mm_maddd_epi16(_val1, _w2, _sum12);
_sum13 = _mm_maddd_epi16(_val1, _w3, _sum13);
#else
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl01 = _mm_mullo_epi16(_val0, _w1);
__m128i _sh01 = _mm_mulhi_epi16(_val0, _w1);
__m128i _sl02 = _mm_mullo_epi16(_val0, _w2);
__m128i _sh02 = _mm_mulhi_epi16(_val0, _w2);
__m128i _sl03 = _mm_mullo_epi16(_val0, _w3);
__m128i _sh03 = _mm_mulhi_epi16(_val0, _w3);
__m128i _sl10 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh10 = _mm_mulhi_epi16(_val1, _w0);
__m128i _sl11 = _mm_mullo_epi16(_val1, _w1);
__m128i _sh11 = _mm_mulhi_epi16(_val1, _w1);
__m128i _sl12 = _mm_mullo_epi16(_val1, _w2);
__m128i _sh12 = _mm_mulhi_epi16(_val1, _w2);
__m128i _sl13 = _mm_mullo_epi16(_val1, _w3);
__m128i _sh13 = _mm_mulhi_epi16(_val1, _w3);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum01 = _mm_add_epi32(_sum01, _mm_unpacklo_epi16(_sl01, _sh01));
_sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl02, _sh02));
_sum03 = _mm_add_epi32(_sum03, _mm_unpacklo_epi16(_sl03, _sh03));
_sum00 = _mm_add_epi32(_sum00, _mm_unpackhi_epi16(_sl00, _sh00));
_sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl01, _sh01));
_sum02 = _mm_add_epi32(_sum02, _mm_unpackhi_epi16(_sl02, _sh02));
_sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl03, _sh03));
_sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10));
_sum11 = _mm_add_epi32(_sum11, _mm_unpacklo_epi16(_sl11, _sh11));
_sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl12, _sh12));
_sum13 = _mm_add_epi32(_sum13, _mm_unpacklo_epi16(_sl13, _sh13));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl10, _sh10));
_sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl11, _sh11));
_sum12 = _mm_add_epi32(_sum12, _mm_unpackhi_epi16(_sl12, _sh12));
_sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl13, _sh13));
#endif
#endif
tmpptr += 16;
kptr0 += 32;
}
#if __AVX2__
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_11, _sum10_01);
_tmp1 = _mm256_unpacklo_epi32(_sum02_13, _sum12_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_11, _sum10_01);
_tmp3 = _mm256_unpackhi_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_01 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum02_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum12_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum10_01);
_sum02_13 = _mm256_add_epi32(_sum02_13, _sum12_03);
_sum00_11 = _mm256_add_epi32(_sum00_11, _sum02_13);
__m256i _perm_mask = _mm256_set_epi32(6, 3, 4, 1, 7, 2, 5, 0);
_sum00_11 = _mm256_permutevar8x32_epi32(_sum00_11, _perm_mask);
int sum[8];
_mm256_storeu_si256((__m256i*)sum, _sum00_11);
#else
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum01);
_tmp1 = _mm_unpacklo_epi32(_sum02, _sum03);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum01);
_tmp3 = _mm_unpackhi_epi32(_sum02, _sum03);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum10, _sum11);
_tmp1 = _mm_unpacklo_epi32(_sum12, _sum13);
_tmp2 = _mm_unpackhi_epi32(_sum10, _sum11);
_tmp3 = _mm_unpackhi_epi32(_sum12, _sum13);
_sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00 = _mm_add_epi32(_sum00, _sum01);
_sum02 = _mm_add_epi32(_sum02, _sum03);
_sum10 = _mm_add_epi32(_sum10, _sum11);
_sum12 = _mm_add_epi32(_sum12, _sum13);
_sum00 = _mm_add_epi32(_sum00, _sum02);
_sum10 = _mm_add_epi32(_sum10, _sum12);
int sum[8];
_mm_storeu_si128((__m128i*)sum, _sum00);
_mm_storeu_si128((__m128i*)(sum + 4), _sum10);
#endif
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0[1] = sum[4];
outptr1[1] = sum[5];
outptr2[1] = sum[6];
outptr3[1] = sum[7];
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum0_1 = _mm256_setzero_si256();
__m256i _sum2_3 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
_val = _mm_cvtepi8_epi16(_val);
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _w23_16 = _mm256_cvtepi8_epi16(_w23);
__m256i _valval = _mm256_inserti128_si256(_mm256_castsi128_si256(_val), _val, 1);
#if __AVXVNNI__ || __AVX512VNNI__
_sum0_1 = _mm256_dpwssd_epi32(_sum0_1, _valval, _w01_16);
_sum2_3 = _mm256_dpwssd_epi32(_sum2_3, _valval, _w23_16);
#else
__m256i _sl0_1 = _mm256_mullo_epi16(_valval, _w01_16);
__m256i _sh0_1 = _mm256_mulhi_epi16(_valval, _w01_16);
__m256i _sl2_3 = _mm256_mullo_epi16(_valval, _w23_16);
__m256i _sh2_3 = _mm256_mulhi_epi16(_valval, _w23_16);
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpacklo_epi16(_sl0_1, _sh0_1));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpacklo_epi16(_sl2_3, _sh2_3));
_sum0_1 = _mm256_add_epi32(_sum0_1, _mm256_unpackhi_epi16(_sl0_1, _sh0_1));
_sum2_3 = _mm256_add_epi32(_sum2_3, _mm256_unpackhi_epi16(_sl2_3, _sh2_3));
#endif
#else
__m128i _val = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val = _mm_cvtepi8_epi16(_val);
#else
_val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val));
#endif
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _w23 = _mm_loadu_si128((const __m128i*)(kptr0 + 16));
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _extw23 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w23);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _w2 = _mm_unpacklo_epi8(_w23, _extw23);
__m128i _w3 = _mm_unpackhi_epi8(_w23, _extw23);
#if __XOP__
_sum0 = _mm_maddd_epi16(_val, _w0, _sum0);
_sum1 = _mm_maddd_epi16(_val, _w1, _sum1);
_sum2 = _mm_maddd_epi16(_val, _w2, _sum2);
_sum3 = _mm_maddd_epi16(_val, _w3, _sum3);
#else
__m128i _sl0 = _mm_mullo_epi16(_val, _w0);
__m128i _sh0 = _mm_mulhi_epi16(_val, _w0);
__m128i _sl1 = _mm_mullo_epi16(_val, _w1);
__m128i _sh1 = _mm_mulhi_epi16(_val, _w1);
__m128i _sl2 = _mm_mullo_epi16(_val, _w2);
__m128i _sh2 = _mm_mulhi_epi16(_val, _w2);
__m128i _sl3 = _mm_mullo_epi16(_val, _w3);
__m128i _sh3 = _mm_mulhi_epi16(_val, _w3);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpacklo_epi16(_sl1, _sh1));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl2, _sh2));
_sum3 = _mm_add_epi32(_sum3, _mm_unpacklo_epi16(_sl3, _sh3));
_sum0 = _mm_add_epi32(_sum0, _mm_unpackhi_epi16(_sl0, _sh0));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl1, _sh1));
_sum2 = _mm_add_epi32(_sum2, _mm_unpackhi_epi16(_sl2, _sh2));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl3, _sh3));
#endif
#endif
tmpptr += 8;
kptr0 += 32;
}
#if __AVX2__
__m128i _sum0 = _mm256_extracti128_si256(_sum0_1, 0);
__m128i _sum1 = _mm256_extracti128_si256(_sum0_1, 1);
__m128i _sum2 = _mm256_extracti128_si256(_sum2_3, 0);
__m128i _sum3 = _mm256_extracti128_si256(_sum2_3, 1);
#endif
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum0, _sum1);
_tmp1 = _mm_unpacklo_epi32(_sum2, _sum3);
_tmp2 = _mm_unpackhi_epi32(_sum0, _sum1);
_tmp3 = _mm_unpackhi_epi32(_sum2, _sum3);
_sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum0 = _mm_add_epi32(_sum0, _sum2);
int sum[4];
_mm_storeu_si128((__m128i*)sum, _sum0);
outptr0[0] = sum[0];
outptr1[0] = sum[1];
outptr2[0] = sum[2];
outptr3[0] = sum[3];
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
__m256i _sum0_2 = _mm256_setzero_si256();
__m256i _sum1_3 = _mm256_setzero_si256();
__m256i _sum4_6 = _mm256_setzero_si256();
__m256i _sum5_7 = _mm256_setzero_si256();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _val23 = _mm_loadu_si128((const __m128i*)(tmpptr + 16));
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m256i _val23_16 = _mm256_cvtepi8_epi16(_val23);
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
_w01_16 = _mm256_permute4x64_epi64(_w01_16, _MM_SHUFFLE(1, 0, 1, 0));
__m256i _sl00_10 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_10 = _mm256_mulhi_epi16(_val01_16, _w01_16);
__m256i _sl20_30 = _mm256_mullo_epi16(_val23_16, _w01_16);
__m256i _sh20_30 = _mm256_mulhi_epi16(_val23_16, _w01_16);
_sum0_2 = _mm256_add_epi32(_sum0_2, _mm256_unpacklo_epi16(_sl00_10, _sh00_10));
_sum1_3 = _mm256_add_epi32(_sum1_3, _mm256_unpackhi_epi16(_sl00_10, _sh00_10));
_sum4_6 = _mm256_add_epi32(_sum4_6, _mm256_unpacklo_epi16(_sl20_30, _sh20_30));
_sum5_7 = _mm256_add_epi32(_sum5_7, _mm256_unpackhi_epi16(_sl20_30, _sh20_30));
tmpptr += 32;
kptr0 += 8;
}
_sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3);
_sum4_6 = _mm256_add_epi32(_sum4_6, _sum5_7);
__m128i _sum0 = _mm256_extracti128_si256(_sum0_2, 0);
__m128i _sum2 = _mm256_extracti128_si256(_sum0_2, 1);
__m128i _sum4 = _mm256_extracti128_si256(_sum4_6, 1);
__m128i _sum6 = _mm256_extracti128_si256(_sum4_6, 1);
outptr0[0] = _mm_reduce_add_epi32(_sum0);
outptr0[1] = _mm_reduce_add_epi32(_sum2);
outptr0[2] = _mm_reduce_add_epi32(_sum4);
outptr0[3] = _mm_reduce_add_epi32(_sum6);
outptr0 += 4;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
#if __AVX2__
__m256i _sum0_2 = _mm256_setzero_si256();
__m256i _sum1_3 = _mm256_setzero_si256();
#else
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
#endif
int j = 0;
for (; j < nn; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
_w01_16 = _mm256_permute4x64_epi64(_w01_16, _MM_SHUFFLE(1, 0, 1, 0));
__m256i _sl00_10 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_10 = _mm256_mulhi_epi16(_val01_16, _w01_16);
_sum0_2 = _mm256_add_epi32(_sum0_2, _mm256_unpacklo_epi16(_sl00_10, _sh00_10));
_sum1_3 = _mm256_add_epi32(_sum1_3, _mm256_unpackhi_epi16(_sl00_10, _sh00_10));
#else
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
__m128i _val1 = _mm_unpackhi_epi8(_val01, _extval01);
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w0 = _mm_cvtepi8_epi16(_w01);
#else
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
#endif
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl10 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh10 = _mm_mulhi_epi16(_val1, _w0);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl10, _sh10));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl10, _sh10));
#endif
tmpptr += 16;
kptr0 += 8;
}
#if __AVX2__
_sum0_2 = _mm256_add_epi32(_sum0_2, _sum1_3);
__m128i _sum0 = _mm256_extracti128_si256(_sum0_2, 0);
__m128i _sum2 = _mm256_extracti128_si256(_sum0_2, 1);
#else
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
#endif
outptr0[0] = _mm_reduce_add_epi32(_sum0);
outptr0[1] = _mm_reduce_add_epi32(_sum2);
outptr0 += 2;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
__m128i _sum0 = _mm_setzero_si128();
__m128i _sum1 = _mm_setzero_si128();
int j = 0;
for (; j < nn; j++)
{
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
__m128i _val0 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
__m128i _w01 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
__m128i _w0 = _mm_cvtepi8_epi16(_w01);
#else
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
#endif
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00));
tmpptr += 8;
kptr0 += 8;
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
outptr0[0] = _mm_reduce_add_epi32(_sum0);
outptr0 += 1;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack8to1_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (outch >= 4)
kernel_tm.create(32 * maxk, inch / 8, outch / 4 + outch % 4, (size_t)1u);
else
kernel_tm.create(8 * maxk, inch / 8, outch, (size_t)1u);
int q = 0;
for (; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
// TODO unroll 2
for (; q < outch; q++)
{
signed char* g00 = kernel_tm.channel(q / 4 + q % 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
static void convolution_im2col_sgemm_pack8to1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
int64_t* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const int64_t* sptr = img.row<const int64_t>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack8to1_int8_sse(bottom_im2col, top_blob, kernel, opt);
}
|
GB_unaryop__minv_uint16_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint16_int16
// op(A') function: GB_tran__minv_uint16_int16
// C type: uint16_t
// A type: int16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 16)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 16) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint16_int16
(
uint16_t *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
myd09ga.c | #include<stdio.h>
#include "gdal.h"
#include "arrays.h"
#include<omp.h>
/* MODLAND QA Bits 500m long int bits[0-1]
* 00 -> class 0: Corrected product produced at ideal quality -- all bands
* 01 -> class 1: Corrected product produced at less than idel quality -- some or all bands
* 10 -> class 2: Corrected product NOT produced due to cloud effect -- all bands
* 11 -> class 3: Corrected product NOT produced due to other reasons -- some or all bands mayb be fill value (Note that a value of [11] overrides a value of [01])
*/
unsigned int myd09GAa(unsigned int pixel)
{
/* Select bit 0 and 1 (right-side).
* hexadecimal "0x03" => binary "11"
* this will set all other bits to null */
return (pixel & 0x03);
}
/* Band-wise Data Quality 500m long Int
* bits[2-5][6-9][10-13][14-17][18-21][22-25][26-29]
* 0000 -> class 0: highest quality
* 0111 -> class 1: noisy detector
* 1000 -> class 2: dead detector; data interpolated in L1B
* 1001 -> class 3: solar zenith >= 86 degrees
* 1010 -> class 4: solar zenith >= 85 and < 86 degrees
* 1011 -> class 5: missing input
* 1100 -> class 6: internal constant used in place of climatological data for at least one atmospheric constant
* 1101 -> class 7: correction out of bounds, pixel constrained to extreme allowable value
* 1110 -> class 8: L1B data faulty
* 1111 -> class 9: not processed due to deep ocean or cloud
* Class 10-15: Combination of bits unused
*/
unsigned int myd09GAc(unsigned int pixel, int bandno)
{
unsigned int qctemp;
pixel >>= 2 + (4 * (bandno - 1)); /* bitshift [] to [0-3] etc. */
qctemp = pixel & 0x0F;
return qctemp;
}
void usage()
{
printf( "-----------------------------------------\n");
printf( "--Modis Processing chain--OpenMP code----\n");
printf( "-----------------------------------------\n");
printf( "./myd09ga inQA inB3\n");
printf( "\tout\n");
printf( "-----------------------------------------\n");
printf( "inQA\t\tModis myd09GA QC_500m_1\n");
printf( "inB3\t\tModis myd09GA Band3\n");
printf( "out\tQA corrected B3 output [-]\n");
return;
}
int main( int argc, char *argv[] )
{
if( argc < 4 ) {
usage();
return (EXIT_FAILURE);
}
char *inB = argv[1]; //QA
char *inB3 = argv[2]; //B3
char *outF = argv[3];
//Loading the input files
GDALAllRegister();
GDALDatasetH hD = GDALOpen(inB,GA_ReadOnly);//QA
GDALDatasetH hD3 = GDALOpen(inB3,GA_ReadOnly);//B3
if(hD==NULL||hD3==NULL){
printf("One or more input files ");
printf("could not be loaded\n");
exit(EXIT_FAILURE);
}
//Loading the file infos
GDALDriverH hDr3 = GDALGetDatasetDriver(hD3);
char **options = NULL;
options = CSLSetNameValue( options, "TILED", "YES" );
options = CSLSetNameValue( options, "COMPRESS", "DEFLATE" );
options = CSLSetNameValue( options, "PREDICTOR", "2" );
//Creating output file
GDALDatasetH hDOut = GDALCreateCopy(hDr3,outF,hD3,FALSE,options,NULL,NULL);
GDALRasterBandH hBOut = GDALGetRasterBand(hDOut,1);
//Loading the file bands
GDALRasterBandH hB = GDALGetRasterBand(hD,1);//QA
GDALRasterBandH hB3 = GDALGetRasterBand(hD3,1);//B3
//Loading the data in RAM
int nX = GDALGetRasterBandXSize(hB3);
int nY = GDALGetRasterBandYSize(hB3);
int N=nX*nY;
unsigned int *l = aui1d(N);
int *l3 = ai1d(N);
int *lOut = ai1d(N);
int rowcol, qa, qa1;
//myd09GA QA 500m
GDALRasterIO(hB,GF_Read,0,0,nX,nY,l,nX,nY,GDT_UInt32,0,0);
//myd09GA B3
GDALRasterIO(hB3,GF_Read,0,0,nX,nY,l3,nX,nY,GDT_Int32,0,0);
#pragma omp parallel for default(none) \
private (rowcol, qa, qa1) shared (N, l, l3, lOut)
for(rowcol=0;rowcol<N;rowcol++){
qa=myd09GAa(l[rowcol]);
qa1=myd09GAc(l3[rowcol],3);
if( qa == 0 || qa1 == 0 ) lOut[rowcol] = l3[rowcol];
else lOut[rowcol] = -28768;
}
#pragma omp barrier
GDALRasterIO(hBOut,GF_Write,0,0,nX,nY,lOut,nX,nY,GDT_Int32,0,0);
if( l != NULL ) free( l );
if( l3 != NULL ) free( l3 );
GDALClose(hD);
GDALClose(hD3);
GDALClose(hDOut);
return(EXIT_SUCCESS);
}
|
RITmain.h | #ifndef RITmain
#define RITmain
#include<vector>
#include<algorithm>
#include<random>
#include<set>
#include<Rcpp.h>
#include<chrono>
#ifdef _OPENMP
#include <omp.h>
#endif
#include<math.h>
#include "RaggedArray.h"
#include "RITaux.h"
using namespace std;
// [[Rcpp::plugins(cpp11)]]
// [[Rcpp::plugins(openmp)]]
set<vector<int> > RIT_basic(RaggedArray &x, NumericVector &weights, const int L, const double branch,
const int depth, const int n_trees, unsigned const int min_inter_sz, const int n_cores, const int n) {
// Set up parameters
const int fl_branch=floor(branch);
const int cl_branch=ceil(branch);
const double branch_diff=branch-fl_branch;
int i1, i2, i;
// Set up vector of seeds for RNG
vector<unsigned int> seeds(n_cores);
for (int i=0; i<n_cores; i++) {
seeds[i] = chrono::high_resolution_clock::now().time_since_epoch().count()*(i+1);
}
// Set up output objects
set<vector<int> > total_candidate_interactions; //union of candidate interactions for all trees
const int depthFinal = depth - 2;
#ifdef _OPENMP
omp_set_num_threads(n_cores);
#endif
#pragma omp parallel
{
// Set up RNG for each thread
#ifdef _OPENMP
mt19937_64 mt(seeds[omp_get_thread_num()]); //Use Mersenne Twister as RNG
#else
mt19937_64 mt(seeds[0]); //Use Mersenne Twister as RNG
#endif
discrete_distribution<int> r_obs(weights.begin(), weights.end());
uniform_real_distribution<> r_unif(0,1); //use for random number of branches
#pragma omp for schedule(static) nowait
for (int tree = 0; tree < n_trees; tree++) {
set<vector<int> > candidate_interactions; //set of candidate interactions for each tree
vector<int> root;
// first intersection computed by walking along arrays as sets will be of
// similar size
i1 = r_obs(mt); i2 = r_obs(mt);
set_intersection(x.begin(i1), x.end(i1), x.begin(i2), x.end(i2), back_inserter(root));
if (root.size() >= min_inter_sz) {
// interactions must have size at least min_inter_sz
if ((root.size() > min_inter_sz) && (depth > 2)) {
// depth >= 3
// Only run this code when the initial intersection produces an interaction of size greater than min_inter_sz
// initialise parents
vector<RaggedArray> parents(depthFinal);
parents[0].push_back(root.begin(), root.end());
for (int depth = 1; depth <= depthFinal; depth++) {
for (int node = 0; node < parents[depth-1].nrow(); node++) {
int cur_branch;
//if(floor(branch)==branch){cur_branch=branch;} //if branch is an integer
if (r_unif(mt) < branch_diff) {
cur_branch=cl_branch;
} //if random number in (0,1) is less than decimal part of branch
else {
cur_branch=fl_branch;
} //if random number in (0,1) is greater than decimal part of branch
for (int k = 0; k < cur_branch; k++) {
i = r_obs(mt);
vector<int> temp_interaction = binary_intersect(x.begin(i), x.end(i),parents[depth-1].begin(node), parents[depth-1].end(node));
if (temp_interaction.size() >= min_inter_sz) {
if ((depth == depthFinal) || (temp_interaction.size() == min_inter_sz)) {
candidate_interactions.insert(temp_interaction);
}
else {
parents[depth].push_back(temp_interaction.begin(), temp_interaction.end());
}
}
}
}
}
}
else {
candidate_interactions.insert(root);
}
}
#pragma omp critical(update_total_candidate_interactions)
{
total_candidate_interactions.insert(candidate_interactions.begin(), candidate_interactions.end());
}
}
}
return total_candidate_interactions;
}
// [[Rcpp::plugins(cpp11)]]
// [[Rcpp::plugins(openmp)]]
set<vector<int> > RIT_minhash(RaggedArray &x, const int L, const double branch, const int depth, const int n_trees,
const double theta0, const double theta1, unsigned const int min_inter_sz, const int n_cores, const int n, int** H0t, const double n0_plus_1_over_n0,
const double recip_n0_plus_1) {
// Set up parameters
const int fl_branch=floor(branch);
const int cl_branch=ceil(branch);
const double branch_diff=branch-fl_branch;
// Set up vector of seeds for RNG
vector<unsigned int> seeds(n_cores);
for (int i=0; i<n_cores; i++) {
seeds[i] = chrono::high_resolution_clock::now().time_since_epoch().count()*(i+1);
}
// Set up output objects
set<vector<int> > total_candidate_interactions; //union of candidate interactions for all trees
const int depthFinal = depth - 2;
#ifdef _OPENMP
omp_set_num_threads(n_cores);
#endif
#pragma omp parallel
{
// Set up RNG for each thread
#ifdef _OPENMP
mt19937_64 mt(seeds[omp_get_thread_num()]); //Use Mersenne Twister as RNG
#else
mt19937_64 mt(seeds[0]); //Use Mersenne Twister as RNG
#endif
uniform_int_distribution<int> r_obs(0,n-1);
uniform_real_distribution<> r_unif(0,1); //use for random number of branches
#pragma omp for schedule(static) nowait
for (int tree = 0; tree < n_trees; tree++) {
set<vector<int> > candidate_interactions; //set of candidate interactions from each tree
vector<int> root;
// first intersection computed by walking along arrays as sets will be of similar size
int i1, i2;
i1 = r_obs(mt); i2 = r_obs(mt);
set_intersection(x.begin(i1), x.end(i1), x.begin(i2), x.end(i2), back_inserter(root));
if ((root.size() >= min_inter_sz) && (PrevEst(root, H0t, L, n0_plus_1_over_n0, recip_n0_plus_1) < theta0)) {
// Class 0 prevalence must be low
// interactions must have size at least min_inter_sz
if ((root.size() > min_inter_sz) && (depth > 2)) {
// depth >= 3
// Only run this code when the initial intersection produces an interaction of size greater than min_inter_sz
// initialise parents
vector<RaggedArray> parents(depthFinal);
parents[0].push_back(root.begin(), root.end());
for (int depth = 1; depth <= depthFinal; depth++) {
for (int node = 0; node < parents[depth-1].nrow(); node++) {
int cur_branch;
if (r_unif(mt) < branch_diff) {
cur_branch=cl_branch;
} //if random number in (0,1) is less than decimal part of branch
else {
cur_branch=fl_branch;
} //if random number in (0,1) is greater than decimal part of branch
for (int k = 0; k < cur_branch; k++) {
int i = r_obs(mt);
vector<int> temp_interaction = binary_intersect(x.begin(i), x.end(i),parents[depth-1].begin(node), parents[depth-1].end(node));
if ((temp_interaction.size() >= min_inter_sz) && (PrevEst(temp_interaction, H0t, L, n0_plus_1_over_n0, recip_n0_plus_1)< theta0)) {
if ((depth == depthFinal) || (temp_interaction.size() == min_inter_sz)) {
candidate_interactions.insert(temp_interaction);
}
else {
parents[depth].push_back(temp_interaction.begin(), temp_interaction.end());
}
}
}
}
}
}
else {
candidate_interactions.insert(root);
}
}
#pragma omp critical(update_total_candidate_interactions)
{
total_candidate_interactions.insert(candidate_interactions.begin(), candidate_interactions.end());
}
}
}
return total_candidate_interactions;
}
#endif
|
ransac.h | #ifndef _EAGLEEYE_RANSAC_H_
#define _EAGLEEYE_RANSAC_H_
#include "eagleeye/common/EagleeyeMacro.h"
#include "eagleeye/common/EagleeyeLog.h"
#include "eagleeye/common/EagleeyeTime.h"
#include "eagleeye/basic/Matrix.h"
#include "eagleeye/basic/MatrixMath.h"
#include <iostream>
#include <cmath>
#include <string>
#include <random>
#include <memory>
#include <algorithm>
#include <vector>
#ifdef EAGLEEYE_OPENMP
#include <omp.h>
#endif
namespace eagleeye{
// Each abstract model is made of abstract parameters
// Could be anything from a point (that make a 2D line or 3D plane or image correspondences) to a line
class AbstractParameter{
public:
virtual ~AbstractParameter(void) {}; // To make this polymorphic we add dummy destructor
};
// Abstract model type for generic RANSAC model fitting
template <int t_NumParams, int t_ModelSize> /* Minimum number of parameters required to define this model*/
class AbstractModel{
public:
static const int ModelParamNum = t_NumParams;
static const int ModelSize = t_ModelSize;
AbstractModel(){
}
protected:
std::array<std::shared_ptr<AbstractParameter>, t_NumParams> m_MinModelParams;
public:
virtual Matrix<float> build(const std::vector<std::shared_ptr<AbstractParameter>> &InputParams) = 0;
virtual Matrix<float> evaluate(const std::vector<std::shared_ptr<AbstractParameter>> &EvaluateParams, Matrix<float> MultiH,float Threshold) = 0;
virtual std::array<std::shared_ptr<AbstractParameter>, t_NumParams> getModelParams(void) { return m_MinModelParams; };
};
// T - AbstractModel
template <class T>
class RANSAC{
private:
const int t_NumParams = T::ModelParamNum;
std::vector<std::shared_ptr<AbstractParameter>> m_Data; // All the data
Matrix<float> m_BestModel; // Pointer to the best model, valid only after Estimate() is called
std::vector<std::shared_ptr<AbstractParameter>> m_BestInliers;
int m_MaxIterations; // Number of iterations before termination
float m_Threshold; // The threshold for computing model consensus
float m_BestModelScore; // The score of the best model
int m_BestModelIdx;
std::vector<std::mt19937> m_RandEngines; // Mersenne twister high quality RNG that support *OpenMP* multi-threading
std::shared_ptr<T> m_RandomModel;
int m_nThreads;
public:
RANSAC(void){
#ifdef EAGLEEYE_OPENMP
m_nThreads = std::max(1, omp_get_max_threads());
#else
m_nThreads = 1;
#endif
EAGLEEYE_LOGD("RANSAC Maximum usable threads: %d", m_nThreads);
for (int i = 0; i < m_nThreads; ++i){
std::random_device SeedDevice;
m_RandEngines.push_back(std::mt19937(SeedDevice()));
}
m_RandomModel = std::make_shared<T>();
reset();
};
virtual ~RANSAC(void) {};
void reset(void){
// Clear sampled models, etc. and prepare for next call. Reset RANSAC estimator state
m_Data.clear();
m_BestModelIdx = -1;
m_BestModelScore = 0.0;
};
void initialize(float Threshold, int MaxIterations = 1000){
m_Threshold = Threshold;
m_MaxIterations = MaxIterations;
};
Matrix<float> getBestModel() { return m_BestModel; };
const std::vector<std::shared_ptr<AbstractParameter>>& getBestInliers(void) { return m_BestInliers; };
bool estimate(const std::vector<std::shared_ptr<AbstractParameter>> &Data){
if (Data.size() <= t_NumParams){
EAGLEEYE_LOGE("RANSAC - Number of data points is too less. Not doing anything.");
return false;
}
m_Data = Data;
int DataSize = m_Data.size();
std::vector<Matrix<float>> ransac_models(m_MaxIterations);
EAGLEEYE_TIME_START(ini_ransac_models);
#ifdef EAGLEEYE_OPENMP
omp_set_dynamic(0); // Explicitly disable dynamic teams
omp_set_num_threads(m_nThreads);
#pragma omp parallel for
#endif
for(int i=0; i<m_MaxIterations; ++i){
#ifdef EAGLEEYE_OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
// Select t_NumParams random samples
std::vector<std::shared_ptr<AbstractParameter>> RandomSamples(t_NumParams);
std::vector<std::shared_ptr<AbstractParameter>> RemainderSamples = m_Data; // Without the chosen random samples
std::shuffle(RemainderSamples.begin(), RemainderSamples.end(), m_RandEngines[thread_id]); // To avoid picking the same element more than once
std::copy(RemainderSamples.begin(), RemainderSamples.begin() + t_NumParams, RandomSamples.begin());
Matrix<float> M = m_RandomModel->build(RandomSamples);
ransac_models[i] = M.flatten();
}
EAGLEEYE_TIME_END(ini_ransac_models);
Matrix<float> ransac_models_mat = concat(ransac_models, 0);
Matrix<float> ransac_inlier_fractions = m_RandomModel->evaluate(m_Data, ransac_models_mat, m_Threshold);
for (int i = 0; i < m_MaxIterations; ++i){
// Check if the sampled model is the best so far
if (ransac_inlier_fractions.at(0,i) > m_BestModelScore){
m_BestModelScore = ransac_inlier_fractions.at(0,i);
m_BestModel = ransac_models_mat(Range(i,i+1),Range(0,9));
}
}
m_BestModel = m_BestModel.clone();
EAGLEEYE_LOGD("BestInlierFraction: %f",m_BestModelScore);
reset();
return true;
};
};
}
#endif |
hermm_c_dia_n_lo_row_trans.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
#include <stdlib.h>
alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Complex *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Complex beta, ALPHA_Complex *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT r = 0; r < mat->rows; r++)
for(ALPHA_INT c = 0; c < columns; c++)
alpha_mul(y[index2(r,c,ldy)],y[index2(r,c,ldy)],beta);
#ifdef _OPENMP
#pragma omp parallel num_threads(num_threads)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT bcl = cross_block_low(tid,num_threads,columns);
ALPHA_INT bch = cross_block_high(tid,num_threads,columns);
for(ALPHA_INT di = 0; di < mat->ndiag;++di){
ALPHA_INT d = mat->distance[di];
if(d < 0){
ALPHA_INT ars = alpha_max(0,-d);
ALPHA_INT acs = alpha_max(0,d);
ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs);
for(ALPHA_INT i = 0; i < an; ++i){
ALPHA_INT ar = ars + i;
ALPHA_INT ac = acs + i;
ALPHA_Complex val,val_c;
alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha);
alpha_mul_2c(val_c,mat->values[index2(di,ar,mat->lval)],alpha);
for(ALPHA_INT bc = bcl;bc < bch;++bc){
alpha_madde(y[index2(ar,bc,ldy)],val_c,x[index2(ac,bc,ldx)]);
alpha_madde(y[index2(ac,bc,ldy)],val,x[index2(ar,bc,ldx)]);
}
}
}
if(d == 0){
for(ALPHA_INT r = 0; r < mat->rows; ++r){
ALPHA_Number val;
alpha_mul(val,mat->values[index2(di,r,mat->lval)],alpha);
for(ALPHA_INT bc = bcl;bc < bch;++bc){
alpha_madde(y[index2(r,bc,ldy)],val,x[index2(r,bc,ldx)]);
}
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
threadprivate.c | #include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#endif
int counter=0;
#pragma omp threadprivate(counter)
int main(void)
{
int i;
#pragma omp parallel for ordered
for(i=0;i<100;i++)
counter++;
#pragma omp parallel
printf("counter=%d\n",counter);
return 0;
}
|
quick_tasks.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <omp.h>
/* OpenMP Parallel Quicksort Using Tasking
*
* @author: ANDREW VAILLANCOURT
* 2019
*/
int partition (int p, int r, int *data){
int x = data[p];
int k = p;
int l = r + 1;
int t;
while (1) {
do
k++;
while ((data[k] <= x) && (k < r));
do
l--;
while (data[l] > x);
while (k < l) {
t = data[k];
data[k] = data[l];
data[l] = t;
do
k++;
while (data[k] <= x);
do
l--;
while (data[l] > x);
}
t = data[p];
data[p] = data[l];
data[l] = t;
return l;
}
}
void seq_quick_sort (int p, int r, int *data){
if (p < r) {
int q = partition (p, r, data);
seq_quick_sort (p, q - 1, data);
seq_quick_sort (q + 1, r, data);
}
}
void quick_sort (int p, int r, int *data, int low_limit) {
if (p < r) {
if ((r - p) < low_limit) {
seq_quick_sort(p, r, data);
}
else {
int q = partition (p, r, data);
#pragma omp task firstprivate(data, low_limit, r, q)
quick_sort (p, q - 1, data, low_limit);
#pragma omp task firstprivate(data, low_limit, r, q)
quick_sort (q + 1, r, data, low_limit);
}
}
}
void par_quick_sort (int n, int *data, int low_limit) {
#pragma omp parallel
{
#pragma omp single nowait
quick_sort (0, n, data, low_limit);
}
}
void validate_sort (int n, int *data){
int i;
for (i = 0; i < n - 1; i++) {
if (data[i] > data[i+1]) {
printf ("ERROR: Validate failed\n");
}
}
}
int main (int argc, char *argv[]){
int i, n, low_limit, *data;
double start, end;
if (argc != 4) {
printf ("./tasks num_elems threshold num_threads\n");
return 1;
}
n = atoi(argv[1]);
low_limit = atoi(argv[2]);
int threads = atoi(argv[3]); // Requested number of threads
int processors = omp_get_num_procs(); // Available processors
if (threads > processors) {
printf("Warning: %d threads requested, will run_omp on %d processors available\n",threads, processors);
}
omp_set_num_threads(threads);
// Generate the array.
data = (int *)malloc (sizeof (int) * n);
for ( i=0; i<n; i++ ) {
data[i] = rand();
}
start = omp_get_wtime();
par_quick_sort (n - 1, &data[0], low_limit);
end = omp_get_wtime();
printf("%.4f\n", end - start);
validate_sort (n, &data[0]);
free (data);
return 0;
}
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc),
std::string, RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
llvm::Regex RE(RegExp);
return RE.match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the the statement are expanded from different
/// appearances of the macro.
///
/// FIXME: Change to be a polymorphic matcher that works on any syntactic
/// node. There's nothing `Stmt`-specific about it.
AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches public C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPublic())
/// matches 'int a;'
AST_MATCHER(Decl, isPublic) {
return Node.getAccess() == AS_public;
}
/// Matches protected C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isProtected())
/// matches 'int b;'
AST_MATCHER(Decl, isProtected) {
return Node.getAccess() == AS_protected;
}
/// Matches private C++ declarations.
///
/// Given
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c;
/// };
/// \endcode
/// fieldDecl(isPrivate())
/// matches 'int c;'
AST_MATCHER(Decl, isPrivate) {
return Node.getAccess() == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(TK_IgnoreImplicitCastsAndParentheses,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename P1> class MatcherT, typename P1,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1<
MatcherT, P1, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
TK, InnerMatcher);
}
template <template <typename T, typename P1, typename P2> class MatcherT,
typename P1, typename P2, typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2<
MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
TK, InnerMatcher);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatcher.
///
/// However, \c optionally will retain any bindings generated by the submatcher.
/// Useful when additional information which may or may not present about a main
/// matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::Matcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::Matcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) {
assert(!RegExp.empty());
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
llvm::Regex RE(RegExp);
return RE.match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, StringRef,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(Name);
}
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) {
assert(!RegExp.empty());
std::string SelectorString = Node.getSelector().getAsString();
llvm::Regex RE(RegExp);
return RE.match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<ValueDecl>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches any capture of a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(anything()))
/// matches [x](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
InnerMatcher, 0) {
for (const LambdaCapture &Capture : Node.captures()) {
if (Capture.capturesVariable()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
}
return false;
}
/// Matches any capture of 'this' in a lambda expression.
///
/// Given
/// \code
/// struct foo {
/// void bar() {
/// auto f = [this](){};
/// }
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
/// matches [this](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
return LC.capturesThis();
});
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches operator expressions (binary or unary) that have any of the
/// specified names.
///
/// hasAnyOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasAnyOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; })
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., ofKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches if the given method declaration is virtual.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isVirtual) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER(Expr, nullPointerConstant) {
return Node.isNullPointerConstant(Finder->getASTContext(),
Expr::NPC_ValueDependentIsNull);
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the Stmt AST node that is marked as being the structured-block
/// of an OpenMP executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``stmt(isOMPStructuredBlock()))`` matches ``{}``.
AST_MATCHER(Stmt, isOMPStructuredBlock) { return Node.isOMPStructuredBlock(); }
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``.
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
nsfactor.c | /*
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team,
* check out http://www.gromacs.org for more information.
* Copyright (c) 2012,2013, by the GROMACS development team, led by
* David van der Spoel, Berk Hess, Erik Lindahl, and including many
* others, as listed in the AUTHORS file in the top-level source
* directory and at http://www.gromacs.org.
*
* GROMACS is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* GROMACS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with GROMACS; if not, see
* http://www.gnu.org/licenses, or write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* If you want to redistribute modifications to GROMACS, please
* consider that scientific software is very special. Version
* control is crucial - bugs must be traceable. We will be happy to
* consider code for inclusion in the official distribution, but
* derived work must not be called official GROMACS. Details are found
* in the README & COPYING files - if they are missing, get the
* official version at http://www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
* the research papers on the package. Check out http://www.gromacs.org.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <ctype.h>
#include <string.h>
#include "futil.h"
#include "gmx_random.h"
#include "smalloc.h"
#include "sysstuff.h"
#include "strdb.h"
#include "vec.h"
#include "nsfactor.h"
#include "gmx_omp.h"
void check_binwidth(real binwidth)
{
real smallest_bin = 0.1;
if (binwidth < smallest_bin)
{
gmx_fatal(FARGS, "Binwidth shouldnt be smaller then smallest bond length (H-H bond ~0.1nm) in a box");
}
}
void check_mcover(real mcover)
{
if (mcover > 1.0)
{
gmx_fatal(FARGS, "mcover should be -1 or (0,1]");
}
else if ((mcover < 0)&(mcover != -1))
{
gmx_fatal(FARGS, "mcover should be -1 or (0,1]");
}
else
{
return;
}
}
void normalize_probability(int n, double *a)
{
int i;
double norm = 0.0;
for (i = 0; i < n; i++)
{
norm += a[i];
}
for (i = 0; i < n; i++)
{
a[i] /= norm;
}
}
gmx_neutron_atomic_structurefactors_t *gmx_neutronstructurefactors_init(const char *datfn)
{
/* read nsfactor.dat */
FILE *fp;
char line[STRLEN];
int nralloc = 10;
int n, p;
int i, line_no;
char atomnm[8];
double slength;
gmx_neutron_atomic_structurefactors_t *gnsf;
fp = libopen(datfn);
line_no = 0;
/* allocate memory for structure */
snew(gnsf, nralloc);
snew(gnsf->atomnm, nralloc);
snew(gnsf->p, nralloc);
snew(gnsf->n, nralloc);
snew(gnsf->slength, nralloc);
gnsf->nratoms = line_no;
while (get_a_line(fp, line, STRLEN))
{
i = line_no;
if (sscanf(line, "%s %d %d %lf", atomnm, &p, &n, &slength) == 4)
{
gnsf->atomnm[i] = strdup(atomnm);
gnsf->n[i] = n;
gnsf->p[i] = p;
gnsf->slength[i] = slength;
line_no++;
gnsf->nratoms = line_no;
if (line_no == nralloc)
{
nralloc++;
srenew(gnsf->atomnm, nralloc);
srenew(gnsf->p, nralloc);
srenew(gnsf->n, nralloc);
srenew(gnsf->slength, nralloc);
}
}
else
{
fprintf(stderr, "WARNING: Error in file %s at line %d ignored\n",
datfn, line_no);
}
}
srenew(gnsf->atomnm, gnsf->nratoms);
srenew(gnsf->p, gnsf->nratoms);
srenew(gnsf->n, gnsf->nratoms);
srenew(gnsf->slength, gnsf->nratoms);
fclose(fp);
return (gmx_neutron_atomic_structurefactors_t *) gnsf;
}
gmx_sans_t *gmx_sans_init (t_topology *top, gmx_neutron_atomic_structurefactors_t *gnsf)
{
gmx_sans_t *gsans = NULL;
int i, j;
/* Try to assing scattering length from nsfactor.dat */
snew(gsans, 1);
snew(gsans->slength, top->atoms.nr);
/* copy topology data */
gsans->top = top;
for (i = 0; i < top->atoms.nr; i++)
{
for (j = 0; j < gnsf->nratoms; j++)
{
if (top->atoms.atom[i].atomnumber == gnsf->p[j])
{
/* we need special case for H and D */
if (top->atoms.atom[i].atomnumber == 1)
{
if (top->atoms.atom[i].m == 1.008000)
{
gsans->slength[i] = gnsf->slength[0];
}
else
{
gsans->slength[i] = gnsf->slength[1];
}
}
else
{
gsans->slength[i] = gnsf->slength[j];
}
}
}
}
return (gmx_sans_t *) gsans;
}
gmx_radial_distribution_histogram_t *calc_radial_distribution_histogram (
gmx_sans_t *gsans,
rvec *x,
matrix box,
atom_id *index,
int isize,
double binwidth,
gmx_bool bMC,
gmx_bool bNORM,
real mcover,
unsigned int seed)
{
gmx_radial_distribution_histogram_t *pr = NULL;
rvec dist;
double rmax;
int i, j;
#ifdef GMX_OPENMP
double **tgr;
int tid;
int nthreads;
gmx_rng_t *trng = NULL;
#endif
gmx_large_int_t mc = 0, max;
gmx_rng_t rng = NULL;
/* allocate memory for pr */
snew(pr, 1);
/* set some fields */
pr->binwidth = binwidth;
/*
* create max dist rvec
* dist = box[xx] + box[yy] + box[zz]
*/
rvec_add(box[XX], box[YY], dist);
rvec_add(box[ZZ], dist, dist);
rmax = norm(dist);
pr->grn = (int)floor(rmax/pr->binwidth)+1;
rmax = pr->grn*pr->binwidth;
snew(pr->gr, pr->grn);
if (bMC)
{
/* Special case for setting automaticaly number of mc iterations to 1% of total number of direct iterations */
if (mcover == -1)
{
max = (gmx_large_int_t)floor(0.5*0.01*isize*(isize-1));
}
else
{
max = (gmx_large_int_t)floor(0.5*mcover*isize*(isize-1));
}
rng = gmx_rng_init(seed);
#ifdef GMX_OPENMP
nthreads = gmx_omp_get_max_threads();
snew(tgr, nthreads);
snew(trng, nthreads);
for (i = 0; i < nthreads; i++)
{
snew(tgr[i], pr->grn);
trng[i] = gmx_rng_init(gmx_rng_uniform_uint32(rng));
}
#pragma omp parallel shared(tgr,trng,mc) private(tid,i,j)
{
tid = gmx_omp_get_thread_num();
/* now starting parallel threads */
#pragma omp for
for (mc = 0; mc < max; mc++)
{
i = (int)floor(gmx_rng_uniform_real(trng[tid])*isize);
j = (int)floor(gmx_rng_uniform_real(trng[tid])*isize);
if (i != j)
{
tgr[tid][(int)floor(sqrt(distance2(x[index[i]], x[index[j]]))/binwidth)] += gsans->slength[index[i]]*gsans->slength[index[j]];
}
}
}
/* collecting data from threads */
for (i = 0; i < pr->grn; i++)
{
for (j = 0; j < nthreads; j++)
{
pr->gr[i] += tgr[j][i];
}
}
/* freeing memory for tgr and destroying trng */
for (i = 0; i < nthreads; i++)
{
sfree(tgr[i]);
gmx_rng_destroy(trng[i]);
}
sfree(tgr);
sfree(trng);
#else
for (mc = 0; mc < max; mc++)
{
i = (int)floor(gmx_rng_uniform_real(rng)*isize);
j = (int)floor(gmx_rng_uniform_real(rng)*isize);
if (i != j)
{
pr->gr[(int)floor(sqrt(distance2(x[index[i]], x[index[j]]))/binwidth)] += gsans->slength[index[i]]*gsans->slength[index[j]];
}
}
#endif
gmx_rng_destroy(rng);
}
else
{
#ifdef GMX_OPENMP
nthreads = gmx_omp_get_max_threads();
/* Allocating memory for tgr arrays */
snew(tgr, nthreads);
for (i = 0; i < nthreads; i++)
{
snew(tgr[i], pr->grn);
}
#pragma omp parallel shared(tgr) private(tid,i,j)
{
tid = gmx_omp_get_thread_num();
/* starting parallel threads */
#pragma omp for
for (i = 0; i < isize; i++)
{
for (j = 0; j < i; j++)
{
tgr[tid][(int)floor(sqrt(distance2(x[index[i]], x[index[j]]))/binwidth)] += gsans->slength[index[i]]*gsans->slength[index[j]];
}
}
}
/* collecating data for pr->gr */
for (i = 0; i < pr->grn; i++)
{
for (j = 0; j < nthreads; j++)
{
pr->gr[i] += tgr[j][i];
}
}
/* freeing memory for tgr */
for (i = 0; i < nthreads; i++)
{
sfree(tgr[i]);
}
sfree(tgr);
#else
for (i = 0; i < isize; i++)
{
for (j = 0; j < i; j++)
{
pr->gr[(int)floor(sqrt(distance2(x[index[i]], x[index[j]]))/binwidth)] += gsans->slength[index[i]]*gsans->slength[index[j]];
}
}
#endif
}
/* normalize if needed */
if (bNORM)
{
normalize_probability(pr->grn, pr->gr);
}
snew(pr->r, pr->grn);
for (i = 0; i < pr->grn; i++)
{
pr->r[i] = (pr->binwidth*i+pr->binwidth*0.5);
}
return (gmx_radial_distribution_histogram_t *) pr;
}
gmx_static_structurefactor_t *convert_histogram_to_intensity_curve (gmx_radial_distribution_histogram_t *pr, double start_q, double end_q, double q_step)
{
gmx_static_structurefactor_t *sq = NULL;
int i, j;
/* init data */
snew(sq, 1);
sq->qn = (int)floor((end_q-start_q)/q_step);
snew(sq->q, sq->qn);
snew(sq->s, sq->qn);
for (i = 0; i < sq->qn; i++)
{
sq->q[i] = start_q+i*q_step;
}
if (start_q == 0.0)
{
sq->s[0] = 1.0;
for (i = 1; i < sq->qn; i++)
{
for (j = 0; j < pr->grn; j++)
{
sq->s[i] += (pr->gr[j]/pr->r[j])*sin(sq->q[i]*pr->r[j]);
}
sq->s[i] /= sq->q[i];
}
}
else
{
for (i = 0; i < sq->qn; i++)
{
for (j = 0; j < pr->grn; j++)
{
sq->s[i] += (pr->gr[j]/pr->r[j])*sin(sq->q[i]*pr->r[j]);
}
sq->s[i] /= sq->q[i];
}
}
return (gmx_static_structurefactor_t *) sq;
}
|
mixed_tentusscher_myo_epi_2004_S3_2.c | // Scenario 3 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt + Rc)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S3_2.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5666381018619,0.00129051605124356,0.779668062709313,0.779476464261432,0.000174942983091856,0.485073219069535,0.00294064377784649,0.999998347537377,1.93402807596828e-08,1.89147579865266e-05,0.999766536540144,1.00702637601413,0.999994286442644,4.67868204274064e-05,0.302079209595859,10.4213056981946,139.366419036882};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.3793865384661,0.000186410749574289,0.000121113650924281,0.000506846995710814,0.225962091956109,0.140516980875224,0.109889354839696,4.57147213592430,0.0152630183984148,1.65236689053833,1099.64269823830,0.000374391607169453,0.513004079808466,0.0169401996097053,0.00333803821185753,6.30360246705429e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
lock_scalability.c | /**
* \file
* \brief Lock scalability benchmark.
*/
/*
* Copyright (c) 2007, 2008, 2009, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
*/
#include <stdio.h>
#include <omp.h>
#include <stdlib.h>
// Use spinlocks if defined, mutexes otherwise
#define SPINLOCKS
#ifdef POSIX
#include <pthread.h>
#include <stdint.h>
#ifdef SPINLOCKS
/** \brief spinlock */
typedef volatile uint64_t spinlock_t __attribute__ ((aligned(64)));
static inline void acquire_spinlock(spinlock_t * volatile lock)
{
__asm__ __volatile__(
"0:\n\t"
"xor %%rax,%%rax\n\t"
"lock bts %%rax,(%0)\n\t"
"jc 0b\n\t"
: : "S" (lock) : "rax"
);
}
static inline void release_spinlock(spinlock_t * volatile lock)
{
*lock = 0;
}
#endif
static inline uint64_t rdtsc(void)
{
uint64_t eax, edx;
__asm volatile ("rdtsc" : "=a" (eax), "=d" (edx));
return (edx << 32) | eax;
}
#endif
int main(int argc, char *argv[])
{
int i=0;
bomp_bomp_init(4);
omp_set_num_threads(4);
#ifndef POSIX
#ifndef SPINLOCKS
static struct thread_mutex lock = THREAD_MUTEX_INITIALIZER;
#else
static spinlock_t lock = 0;
#endif
#else
#ifdef SPINLOCKS
static spinlock_t lock = 0;
#else
static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
#endif
#endif
uint64_t begin = rdtsc();
#pragma omp parallel
{
#pragma omp for private(i)
for(i=0;i<1000000;i++)
{
#ifdef SPINLOCKS
acquire_spinlock(&lock);
release_spinlock(&lock);
#else
thread_mutex_lock(&lock);
thread_mutex_unlock(&lock);
#endif
}
}
uint64_t end = rdtsc();
printf("took %lu\n", end - begin);
}
|
rose_break2.c | #include "omp.h"
int i;
int j;
int a[100][100];
void foo()
{
#pragma omp parallel for private (i)
for (i = 0; i <= 99; i += 1) {
for (j = 0; j <= 99; j += 1) {
a[i][j] = a[i][j] + 1;
if (a[i][j] == 100)
break;
}
}
}
|
BFS.c | // -----------------------------------------------------------------------------
//
// "00_AccelGraph"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : BFS.c
// Create : 2019-09-28 15:20:58
// Revise : 2019-09-28 15:34:05
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <omp.h>
#include "timer.h"
#include "myMalloc.h"
#include "boolean.h"
#include "arrayQueue.h"
#include "bitmap.h"
#include "graphConfig.h"
#include "reorder.h"
#include "graphCSR.h"
#include "graphGrid.h"
#include "graphAdjArrayList.h"
#include "graphAdjLinkedList.h"
#include "libcxl.h"
#include "capienv.h"
#include "BFS.h"
// ********************************************************************************************
// *************** Stats DataStructure **************
// ********************************************************************************************
struct BFSStats *newBFSStatsGraphCSR(struct GraphCSR *graph)
{
uint32_t vertex_id;
struct BFSStats *stats = (struct BFSStats *) my_malloc(sizeof(struct BFSStats));
stats->distances = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));
stats->distances_DualOrder = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));
stats->parents = (int *) my_malloc(graph->num_vertices * sizeof(int));
stats->parents_DualOrder = (int *) my_malloc(graph->num_vertices * sizeof(int));
stats->processed_nodes = 0;
stats->iteration = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0f;
// optimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter
#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)
for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)
{
stats->distances[vertex_id] = 0;
// stats->parents_DualOrder[vertex_id] = 0;
if(graph->vertices->out_degree[vertex_id])
{
stats->parents[vertex_id] = graph->vertices->out_degree[vertex_id] * (-1);
stats->parents_DualOrder[vertex_id] = graph->vertices->out_degree[vertex_id] * (-1);
}
else
{
stats->parents[vertex_id] = -1;
stats->parents_DualOrder[vertex_id] = -1;
}
}
return stats;
}
struct BFSStats *newBFSStatsGraphGrid(struct GraphGrid *graph)
{
uint32_t vertex_id;
struct BFSStats *stats = (struct BFSStats *) my_malloc(sizeof(struct BFSStats));
stats->distances_DualOrder = NULL;
stats->parents_DualOrder = NULL;
stats->distances = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));
stats->parents = (int *) my_malloc(graph->num_vertices * sizeof(int));
stats->processed_nodes = 0;
stats->iteration = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0f;
#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)
for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)
{
stats->distances[vertex_id] = 0;
stats->parents[vertex_id] = -1;
}
return stats;
}
struct BFSStats *newBFSStatsGraphAdjArrayList(struct GraphAdjArrayList *graph)
{
uint32_t vertex_id;
struct BFSStats *stats = (struct BFSStats *) my_malloc(sizeof(struct BFSStats));
stats->distances_DualOrder = NULL;
stats->parents_DualOrder = NULL;
stats->distances = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));
stats->parents = (int *) my_malloc(graph->num_vertices * sizeof(int));
stats->processed_nodes = 0;
stats->iteration = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0f;
// optimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter
#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)
for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)
{
stats->distances[vertex_id] = 0;
if(graph->vertices[vertex_id].out_degree)
stats->parents[vertex_id] = graph->vertices[vertex_id].out_degree * (-1);
else
stats->parents[vertex_id] = -1;
}
return stats;
}
struct BFSStats *newBFSStatsGraphAdjLinkedList(struct GraphAdjLinkedList *graph)
{
uint32_t vertex_id;
struct BFSStats *stats = (struct BFSStats *) my_malloc(sizeof(struct BFSStats));
stats->distances_DualOrder = NULL;
stats->parents_DualOrder = NULL;
stats->distances = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));
stats->parents = (int *) my_malloc(graph->num_vertices * sizeof(int));
stats->processed_nodes = 0;
stats->iteration = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0f;
// optimization for BFS implentaion instead of -1 we use -out degree to for hybrid approach counter
#pragma omp parallel for default(none) private(vertex_id) shared(stats,graph)
for(vertex_id = 0; vertex_id < graph->num_vertices ; vertex_id++)
{
stats->distances[vertex_id] = 0;
if(graph->vertices[vertex_id].out_degree)
stats->parents[vertex_id] = graph->vertices[vertex_id].out_degree * (-1);
else
stats->parents[vertex_id] = -1;
}
return stats;
}
void freeBFSStats(struct BFSStats *stats)
{
if(stats)
{
if(stats->distances)
free(stats->distances);
if(stats->parents)
free(stats->parents);
if(stats->distances_DualOrder)
free(stats->distances_DualOrder);
if(stats->parents_DualOrder)
free(stats->parents_DualOrder);
free(stats);
}
}
void syncDualOrderParentArrays(int **parents, int **parents_DualOrder, uint32_t *labels, uint32_t num_vertices)
{
uint32_t vertex_id;
uint32_t vertex_v;
int *parents_temp;
uint32_t num_threads_max = omp_get_max_threads();
#pragma omp parallel for default(none) private(vertex_id,vertex_v) shared(parents,parents_DualOrder,labels,num_vertices) num_threads(num_threads_max)
for(vertex_id = 0; vertex_id < num_vertices ; vertex_id++)
{
vertex_v = labels[vertex_id];
// vertex_u = inv_labels[vertex_id];
if((*parents)[vertex_id] >= 0)
{
(*parents_DualOrder)[vertex_v] = labels[(*parents)[vertex_id]];
}
else
{
(*parents_DualOrder)[vertex_v] = (*parents)[vertex_id];
}
}
parents_temp = *parents;
*parents = *parents_DualOrder;
*parents_DualOrder = parents_temp;
}
void syncDualOrderDistancesArrays(uint32_t *distances, uint32_t *distances_DualOrder, uint32_t *labels, uint32_t num_vertices)
{
uint32_t vertex_id;
uint32_t vertex_v;
// uint32_t vertex_u;
uint32_t *distances_temp;
uint32_t num_threads_max = omp_get_max_threads();
#pragma omp parallel for default(none) private(vertex_id,vertex_v) shared(distances,distances_DualOrder,labels,num_vertices) num_threads(num_threads_max)
for(vertex_id = 0; vertex_id < num_vertices ; vertex_id++)
{
vertex_v = labels[vertex_id];
// vertex_u = inv_labels[vertex_id];
distances_DualOrder[vertex_v] = distances[vertex_id];
}
distances_temp = distances;
distances = distances_DualOrder;
distances_DualOrder = distances_temp;
}
// ********************************************************************************************
// *************** CSR DataStructure **************
// ********************************************************************************************
struct BFSStats *breadthFirstSearchGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = breadthFirstSearchPullGraphCSR(arguments, graph);
break;
case 1: // push
stats = breadthFirstSearchPushGraphCSR(arguments, graph);
break;
case 2: // pull/push
stats = breadthFirstSearchDirectionOptimizedGraphCSR(arguments, graph);
break;
case 3: // push-bitmap queue instead of array queue
stats = breadthFirstSearchPushBitmapGraphCSR(arguments, graph);
break;
case 4: // pull/push-bitmap queue instead of array queue
stats = breadthFirstSearchPushDirectionOptimizedBitmapGraphCSR(arguments, graph);
break;
default:// push
stats = breadthFirstSearchDirectionOptimizedGraphCSR(arguments, graph);
break;
}
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPullGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", " ---->>> CAPI <<<----");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PULL/BU (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint8_t *workListCurr = NULL;
uint8_t *workListNext = NULL;
workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
resetWorkList(workListNext, graph->num_vertices);
resetWorkList(workListCurr, graph->num_vertices);
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup CAPI **************
// ********************************************************************************************
struct cxl_afu_h *afu;
struct WEDGraphCSR *wedGraphCSR;
wedGraphCSR = mapGraphCSRToWED((struct GraphCSR *)graph);
wedGraphCSR->auxiliary1 = stats->parents;
wedGraphCSR->auxiliary2 = stats->distances;
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUGraphCSR(&afu, wedGraphCSR);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((arguments->cu_config << 32) | (arguments->ker_numThreads));
afu_status.cu_config_2 = arguments->cu_config_2; // non zero CU triggers the AFU to work
afu_status.cu_config_3 = (uint64_t)workListCurr; // non zero CU triggers the AFU to work
afu_status.cu_config_4 = (uint64_t)workListNext; // non zero CU triggers the AFU to work
afu_status.cu_stop = wedGraphCSR->num_vertices; // stop condition once all vertices processed
// ********************************************************************************************
startAFU(&afu, &afu_status);
// ********************************************************************************************
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
Start(timer_inner);
workListNext[arguments->source] = 1;
nf = 1;
stats->parents[arguments->source] = arguments->source;
swapWorkLists(&workListCurr, &workListNext);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while (nf)
{
Start(timer_inner);
afu_status.cu_config_3 = (uint64_t)workListCurr; // non zero CU triggers the AFU to work
afu_status.cu_config_4 = (uint64_t)workListNext; // non zero CU triggers the AFU to work
nf = bottomUpStepGraphCSRCAPI(graph, workListCurr, workListNext, stats, &afu_status, afu);
swapWorkLists(&workListCurr, &workListNext);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
//stats
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
// ********************************************************************************************
// *************** Releasing AFU **************
releaseAFU(&afu);
// ********************************************************************************************
free(workListCurr);
free(workListNext);
free(timer);
free(timer_inner);
free(wedGraphCSR);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPushGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PUSH/TD (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
Start(timer_inner);
topDownStepGraphCSR(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchDirectionOptimizedGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PUSH/PULL(SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
struct Bitmap *bitmapCurr = newBitmap(graph->num_vertices);
struct Bitmap *bitmapNext = newBitmap(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
uint32_t mu = graph->num_edges; // number of edges to check from sharedFrontierQueue
uint32_t mf = graph->vertices->out_degree[arguments->source]; // number of edges from unexplored verticies
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
uint32_t nf_prev = 0; // number of vertices in sharedFrontierQueue
uint32_t n = graph->num_vertices; // number of nodes
uint32_t alpha = 15;
uint32_t beta = 18;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
if(mf > (mu / alpha))
{
Start(timer_inner);
arrayQueueToBitmap(sharedFrontierQueue, bitmapCurr);
nf = sizeArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
printf("| E %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
do
{
Start(timer_inner);
nf_prev = nf;
nf = bottomUpStepGraphCSR(graph, bitmapCurr, bitmapNext, stats);
swapBitmaps(&bitmapCurr, &bitmapNext);
clearBitmap(bitmapNext);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
}
while(( nf > nf_prev) || // growing;
( nf > (n / beta)));
Start(timer_inner);
bitmapToArrayQueue(bitmapCurr, sharedFrontierQueue, localFrontierQueues);
Stop(timer_inner);
printf("| C %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
mf = 1;
}
else
{
Start(timer_inner);
mu -= mf;
mf = topDownStepGraphCSR(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
}
} // end while
Stop(timer);
// stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
freeBitmap(bitmapNext);
freeBitmap(bitmapCurr);
free(timer);
free(timer_inner);
return stats;
}
// top-down-step(graph, sharedFrontierQueue, next, parents)
// for v ∈ sharedFrontierQueue do
// for u ∈ neighbors[v] do
// if parents[u] = -1 then
// parents[u] ← v
// next ← next ∪ {u}
// end if
// end for
// end for
uint32_t topDownStepGraphCSR(struct GraphCSR *graph, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue **localFrontierQueues, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t i;
uint32_t j;
uint32_t edge_idx;
uint32_t mf = 0;
#pragma omp parallel default (none) private(u,v,j,i,edge_idx) shared(stats,localFrontierQueues,graph,sharedFrontierQueue,mf)
{
uint32_t t_id = omp_get_thread_num();
struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];
#pragma omp for reduction(+:mf) schedule(auto)
for(i = sharedFrontierQueue->head ; i < sharedFrontierQueue->tail; i++)
{
v = sharedFrontierQueue->queue[i];
edge_idx = graph->vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + graph->vertices->out_degree[v]) ; j++)
{
u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);
int u_parent = stats->parents[u];
if(u_parent < 0 )
{
if(__sync_bool_compare_and_swap(&stats->parents[u], u_parent, v))
{
enArrayQueue(localFrontierQueue, u);
mf += -(u_parent);
stats->distances[u] = stats->distances[v] + 1;
}
}
}
}
flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);
}
return mf;
}
// bottom-up-step(graph, sharedFrontierQueue, next, parents) //pull
// for v ∈ vertices do
// if parents[v] = -1 then
// for u ∈ neighbors[v] do
// if u ∈ sharedFrontierQueue then
// parents[v] ← u
// next ← next ∪ {v}
// break
// end if
// end for
// end if
// end for
uint32_t bottomUpStepGraphCSR(struct GraphCSR *graph, struct Bitmap *bitmapCurr, struct Bitmap *bitmapNext, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t j;
uint32_t edge_idx;
uint32_t out_degree;
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
// uint32_t processed_nodes = bitmapCurr->numSetBits;
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
// stats->processed_nodes += processed_nodes;
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
#pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)
for(v = 0 ; v < graph->num_vertices ; v++)
{
out_degree = vertices->out_degree[v];
if(stats->parents[v] < 0) // optmization
{
edge_idx = vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + out_degree) ; j++)
{
u = EXTRACT_VALUE(sorted_edges_array[j]);
if(getBit(bitmapCurr, u))
{
stats->parents[v] = u;
//we are not considering distance array as it is not implemented in AccelGraph
stats->distances[v] = stats->distances[u] + 1;
setBitAtomic(bitmapNext, v);
nf++;
break;
}
}
}
}
return nf;
}
uint32_t bottomUpStepGraphCSRCAPI(struct GraphCSR *graph, uint8_t *workListCurr, uint8_t *workListNext, struct BFSStats *stats, struct AFUStatus *afu_status, struct cxl_afu_h *afu)
{
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
// ********************************************************************************************
// *************** START CU **************
startCU(&afu, afu_status);
// ********************************************************************************************
// ********************************************************************************************
// *************** WAIT AFU **************
waitAFU(&afu, afu_status);
// ********************************************************************************************
nf = afu_status->cu_return_done_2;
return nf;
}
// ********************************************************************************************
// *************** CSR DataStructure/Bitmap Frontiers **************
// ********************************************************************************************
// / breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPushBitmapGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PUSH/Bitmap (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
Start(timer_inner);
setBit(sharedFrontierQueue->q_bitmap_next, arguments->source);
sharedFrontierQueue->q_bitmap_next->numSetBits = 1;
stats->parents[arguments->source] = arguments->source;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while (sharedFrontierQueue->q_bitmap->numSetBits)
{
Start(timer_inner);
topDownStepUsingBitmapsGraphCSR(graph, sharedFrontierQueue, stats);
sharedFrontierQueue->q_bitmap_next->numSetBits = getNumOfSetBits(sharedFrontierQueue->q_bitmap_next);
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->q_bitmap->numSetBits;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->q_bitmap->numSetBits, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPushDirectionOptimizedBitmapGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PUSH/PULL Bitmap (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t mu = graph->num_edges; // number of edges to check from sharedFrontierQueue
uint32_t mf = graph->vertices->out_degree[arguments->source]; // number of edges from unexplored verticies
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
uint32_t nf_prev = 0; // number of vertices in sharedFrontierQueue
uint32_t n = graph->num_vertices; // number of nodes
uint32_t alpha = 15;
uint32_t beta = 18;
Start(timer_inner);
setBit(sharedFrontierQueue->q_bitmap_next, arguments->source);
sharedFrontierQueue->q_bitmap_next->numSetBits = 1;
stats->parents[arguments->source] = arguments->source;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while (sharedFrontierQueue->q_bitmap->numSetBits)
{
if(mf > (mu / alpha))
{
nf = sharedFrontierQueue->q_bitmap->numSetBits;
printf("| E %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
do
{
Start(timer_inner);
nf_prev = nf;
nf = bottomUpStepGraphCSR(graph, sharedFrontierQueue->q_bitmap, sharedFrontierQueue->q_bitmap_next, stats);
sharedFrontierQueue->q_bitmap_next->numSetBits = nf;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
//stats
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
}
while(( nf > nf_prev) || // growing;
( nf > (n / beta)));
printf("| C %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
mf = 1;
}
else
{
mu -= mf;
Start(timer_inner);
mf = topDownStepUsingBitmapsGraphCSR(graph, sharedFrontierQueue, stats);
sharedFrontierQueue->q_bitmap_next->numSetBits = getNumOfSetBits(sharedFrontierQueue->q_bitmap_next);
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->q_bitmap->numSetBits;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->q_bitmap->numSetBits, Seconds(timer_inner));
}
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
uint32_t topDownStepUsingBitmapsGraphCSR(struct GraphCSR *graph, struct ArrayQueue *sharedFrontierQueue, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t i;
uint32_t j;
uint32_t edge_idx;
uint32_t mf = 0;
#pragma omp parallel default (none) private(u,v,j,i,edge_idx) shared(stats,graph,sharedFrontierQueue,mf)
{
#pragma omp for reduction(+:mf)
for(i = 0 ; i < (sharedFrontierQueue->q_bitmap->size); i++)
{
if(getBit(sharedFrontierQueue->q_bitmap, i))
{
// processed_nodes++;
v = i;
edge_idx = graph->vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + graph->vertices->out_degree[v]) ; j++)
{
u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);
int u_parent = stats->parents[u];
if(u_parent < 0 )
{
if(__sync_bool_compare_and_swap(&stats->parents[u], u_parent, v))
{
mf += -(u_parent);
stats->distances[u] = stats->distances[v] + 1;
setBitAtomic(sharedFrontierQueue->q_bitmap_next, u);
}
}
}
}
}
}
return mf;
}
// ********************************************************************************************
// *************** CSR DataStructure DualOrder **************
// ********************************************************************************************
struct BFSStats *breadthFirstSearchGraphCSRDualOrder(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = breadthFirstSearchPullGraphCSRDualOrder(arguments, graph);
break;
case 1: // push
stats = breadthFirstSearchPushGraphCSRDualOrder(arguments, graph);
break;
case 2: // pull/push
stats = breadthFirstSearchDirectionOptimizedGraphCSRDualOrder(arguments, graph);
break;
default:// push
stats = breadthFirstSearchDirectionOptimizedGraphCSRDualOrder(arguments, graph);
break;
}
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPullGraphCSRDualOrder(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
#if DIRECTED
arguments->source = graph->inverse_sorted_edges_array->label_array[arguments->source];
#else
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
#endif
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS DualOrder PULL/BU (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
Start(timer_inner);
setBit(sharedFrontierQueue->q_bitmap_next, arguments->source);
sharedFrontierQueue->q_bitmap_next->numSetBits = 1;
stats->parents[arguments->source] = arguments->source;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while (sharedFrontierQueue->q_bitmap->numSetBits)
{
Start(timer_inner);
nf = bottomUpStepGraphCSRDualOrder(graph, sharedFrontierQueue->q_bitmap, sharedFrontierQueue->q_bitmap_next, stats);
sharedFrontierQueue->q_bitmap_next->numSetBits = nf;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
//stats
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
} // end while
Stop(timer);
// stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPushGraphCSRDualOrder(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS DualOrder PUSH/TD (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
Start(timer_inner);
topDownStepGraphCSRDualOrder(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
} // end while
Stop(timer);
// stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchDirectionOptimizedGraphCSRDualOrder(struct Arguments *arguments, struct GraphCSR *graph)
{
struct BFSStats *stats = newBFSStatsGraphCSR(graph);
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
arguments->source = graph->sorted_edges_array->label_array[arguments->source];
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS DualOrder PUSH/PULL(SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
struct Bitmap *bitmapCurr = newBitmap(graph->num_vertices);
struct Bitmap *bitmapNext = newBitmap(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
uint32_t mu = graph->num_edges; // number of edges to check from sharedFrontierQueue
uint32_t mf = graph->vertices->out_degree[arguments->source]; // number of edges from unexplored verticies
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
uint32_t nf_prev = 0; // number of vertices in sharedFrontierQueue
uint32_t n = graph->num_vertices; // number of nodes
uint32_t alpha = 15;
uint32_t beta = 18;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
if(mf > (mu / alpha))
{
Start(timer_inner);
arrayQueueToBitmapDualOrder(sharedFrontierQueue, bitmapCurr, graph->sorted_edges_array->inverse_label_array);
syncDualOrderParentArrays(&(stats->parents), &(stats->parents_DualOrder), graph->sorted_edges_array->inverse_label_array, graph->num_vertices);
// syncDualOrderDistancesArrays(stats->distances, stats->distances_DualOrder, graph->sorted_edges_array->label_array, graph->inverse_sorted_edges_array->label_array, graph->num_vertices);
nf = sizeArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
printf("| E %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
do
{
Start(timer_inner);
nf_prev = nf;
nf = bottomUpStepGraphCSRDualOrder(graph, bitmapCurr, bitmapNext, stats);
swapBitmaps(&bitmapCurr, &bitmapNext);
clearBitmap(bitmapNext);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
}
while(( nf > nf_prev) || // growing;
( nf > (n / beta)));
Start(timer_inner);
syncDualOrderParentArrays(&(stats->parents), &(stats->parents_DualOrder), graph->inverse_sorted_edges_array->inverse_label_array, graph->num_vertices);
// syncDualOrderDistancesArrays(stats->distances, stats->distances_DualOrder, graph->inverse_sorted_edges_array->label_array, graph->sorted_edges_array->label_array, graph->num_vertices);
bitmapToArrayQueueDualOrder(bitmapCurr, sharedFrontierQueue, localFrontierQueues, graph->inverse_sorted_edges_array->inverse_label_array);
Stop(timer_inner);
printf("| C %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
mf = 1;
}
else
{
Start(timer_inner);
mu -= mf;
mf = topDownStepGraphCSRDualOrder(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
}
} // end while
Stop(timer);
// stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
freeBitmap(bitmapNext);
freeBitmap(bitmapCurr);
free(timer);
free(timer_inner);
return stats;
}
// top-down-step(graph, sharedFrontierQueue, next, parents)
// for v ∈ sharedFrontierQueue do
// for u ∈ neighbors[v] do
// if parents[u] = -1 then
// parents[u] ← v
// next ← next ∪ {u}
// end if
// end for
// end for
uint32_t topDownStepGraphCSRDualOrder(struct GraphCSR *graph, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue **localFrontierQueues, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t i;
uint32_t j;
uint32_t edge_idx;
uint32_t mf = 0;
#pragma omp parallel default (none) private(u,v,j,i,edge_idx) shared(stats,localFrontierQueues,graph,sharedFrontierQueue,mf)
{
uint32_t t_id = omp_get_thread_num();
struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];
#pragma omp for reduction(+:mf) schedule(auto)
for(i = sharedFrontierQueue->head ; i < sharedFrontierQueue->tail; i++)
{
v = sharedFrontierQueue->queue[i];
edge_idx = graph->vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + graph->vertices->out_degree[v]) ; j++)
{
u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);
int u_parent = stats->parents[u];
if(u_parent < 0 )
{
if(__sync_bool_compare_and_swap(&stats->parents[u], u_parent, v))
{
enArrayQueue(localFrontierQueue, u);
mf += -(u_parent);
stats->distances[u] = stats->distances[v] + 1;
}
}
}
}
flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);
}
return mf;
}
// bottom-up-step(graph, sharedFrontierQueue, next, parents) //pull
// for v ∈ vertices do
// if parents[v] = -1 then
// for u ∈ neighbors[v] do
// if u ∈ sharedFrontierQueue then
// parents[v] ← u
// next ← next ∪ {v}
// break
// end if
// end for
// end if
// end for
uint32_t bottomUpStepGraphCSRDualOrder(struct GraphCSR *graph, struct Bitmap *bitmapCurr, struct Bitmap *bitmapNext, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t j;
uint32_t edge_idx;
uint32_t out_degree;
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
// uint32_t processed_nodes = bitmapCurr->numSetBits;
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
// stats->processed_nodes += processed_nodes;
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
#pragma omp parallel for default(none) private(j,u,v,out_degree,edge_idx) shared(stats,bitmapCurr,bitmapNext,graph,vertices,sorted_edges_array) reduction(+:nf) schedule(dynamic, 1024)
for(v = 0 ; v < graph->num_vertices ; v++)
{
out_degree = vertices->out_degree[v];
if(stats->parents[v] < 0) // optmization
{
edge_idx = vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + out_degree) ; j++)
{
u = EXTRACT_VALUE(sorted_edges_array[j]);
if(getBit(bitmapCurr, u))
{
stats->parents[v] = u;
//we are not considering distance array as it is not implemented in AccelGraph
stats->distances[v] = stats->distances[u] + 1;
setBitAtomic(bitmapNext, v);
nf++;
break;
}
}
}
}
return nf;
}
// ********************************************************************************************
// *************** GRID DataStructure **************
// ********************************************************************************************
struct BFSStats *breadthFirstSearchGraphGrid(struct Arguments *arguments, struct GraphGrid *graph)
{
struct BFSStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = breadthFirstSearchRowGraphGrid(arguments, graph);
break;
case 1: // push
stats = breadthFirstSearchRowGraphGridBitmap(arguments, graph);
break;
case 2: // pull
stats = breadthFirstSearchColumnGraphGrid(arguments, graph);
break;
case 3: // push
stats = breadthFirstSearchColumnGraphGridBitmap(arguments, graph);
break;
default:// push
stats = breadthFirstSearchRowGraphGrid(arguments, graph);
break;
}
return stats;
}
// function STREAMVERTICES(Fv,F)
// Sum = 0
// for each vertex do
// if F(vertex) then
// Sum += Fv(edge)
// end if
// end for
// return Sum
// end function
// function STREAMEDGES(Fe,F)
// Sum = 0
// for each active block do >> block with active edges
// for each edge ∈ block do
// if F(edge.arguments->source) then
// Sum += Fe(edge)
// end if
// end for
// end for
// return Sum
// end function
//we assume that the edges are not sorted in each partition
struct BFSStats *breadthFirstSearchRowGraphGrid(struct Arguments *arguments, struct GraphGrid *graph)
{
struct BFSStats *stats = newBFSStatsGraphGrid(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS-Row (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_iteration = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
#pragma omp parallel for
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
graphGridReset(graph);
uint32_t processed_nodes = 0;
Start(timer_iteration);
enArrayQueue(sharedFrontierQueue, arguments->source);
arrayQueueGenerateBitmap(sharedFrontierQueue);
stats->parents[arguments->source] = arguments->source;
// graphGridSetActivePartitions(graph->grid, arguments->source);
graphGridSetActivePartitionsMap(graph->grid, arguments->source);
Stop(timer_iteration);
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, ++processed_nodes, Seconds(timer_iteration));
stats->time_total += Seconds(timer_iteration);
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
Start(timer_iteration);
breadthFirstSearchStreamEdgesRowGraphGrid(graph, sharedFrontierQueue, localFrontierQueues, stats);
Stop(timer_iteration);
processed_nodes = sharedFrontierQueue->tail_next - sharedFrontierQueue->tail;
slideWindowArrayQueue(sharedFrontierQueue);
arrayQueueGenerateBitmap(sharedFrontierQueue);
breadthFirstSearchSetActivePartitions(graph, sharedFrontierQueue);
stats->time_total += Seconds(timer_iteration);
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, processed_nodes, Seconds(timer_iteration));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", sharedFrontierQueue->tail_next, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "**", sharedFrontierQueue->tail_next, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
// #pragma omp parallel for
// for(i=0 ; i < P*P ; i++){
// freeArrayQueue(localFrontierQueuesL2[i]);
// }
// free(localFrontierQueuesL2);
free(localFrontierQueues);
free(timer_iteration);
free(timer);
return stats;
}
struct BFSStats *breadthFirstSearchColumnGraphGrid(struct Arguments *arguments, struct GraphGrid *graph)
{
struct BFSStats *stats = newBFSStatsGraphGrid(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS-Column (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_iteration = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
#pragma omp parallel for
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
graphGridReset(graph);
uint32_t processed_nodes = 0;
Start(timer_iteration);
enArrayQueue(sharedFrontierQueue, arguments->source);
arrayQueueGenerateBitmap(sharedFrontierQueue);
stats->parents[arguments->source] = arguments->source;
// graphGridSetActivePartitions(graph->grid, arguments->source);
graphGridSetActivePartitionsMap(graph->grid, arguments->source);
Stop(timer_iteration);
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, ++processed_nodes, Seconds(timer_iteration));
stats->time_total += Seconds(timer_iteration);
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
Start(timer_iteration);
breadthFirstSearchStreamEdgesColumnGraphGrid(graph, sharedFrontierQueue, localFrontierQueues, stats);
Stop(timer_iteration);
processed_nodes = sharedFrontierQueue->tail_next - sharedFrontierQueue->tail;
slideWindowArrayQueue(sharedFrontierQueue);
arrayQueueGenerateBitmap(sharedFrontierQueue);
breadthFirstSearchSetActivePartitions(graph, sharedFrontierQueue);
stats->time_total += Seconds(timer_iteration);
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, processed_nodes, Seconds(timer_iteration));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", sharedFrontierQueue->tail_next, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "**", sharedFrontierQueue->tail_next, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
// #pragma omp parallel for
// for(i=0 ; i < P*P ; i++){
// freeArrayQueue(localFrontierQueuesL2[i]);
// }
// free(localFrontierQueuesL2);
free(localFrontierQueues);
free(timer_iteration);
free(timer);
return stats;
}
// function STREAMEDGES(Fe,F)
// Sum = 0
// for each active block do >> block with active edges
// for each edge ∈ block do
// if F(edge.arguments->source) then
// Sum += Fe(edge)
// end if
// end for
// end for
// return Sum
// end function
//we assume that the edges are not sorted in each partition
void breadthFirstSearchStreamEdgesRowGraphGrid(struct GraphGrid *graph, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue **localFrontierQueues, struct BFSStats *stats)
{
// struct Timer* timer = (struct Timer*) malloc(sizeof(struct Timer));
uint32_t totalPartitions = 0;
totalPartitions = graph->grid->num_partitions; // PxP
uint32_t i;
for (i = 0; i < totalPartitions; ++i)
{
uint32_t j;
#pragma omp parallel for default(none) shared(i,stats,totalPartitions,localFrontierQueues ,sharedFrontierQueue, graph)
for (j = 0; j < totalPartitions; ++j)
{
uint32_t t_id = omp_get_thread_num();
// uint32_t A = 0;
struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];
if(getBit(graph->grid->activePartitionsMap, (i * totalPartitions) + j))
{
// #pragma omp task untied
// {
breadthFirstSearchPartitionGraphGrid(graph, &(graph->grid->partitions[(i * totalPartitions) + j]), sharedFrontierQueue, localFrontierQueue, stats);
flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);
// }
}
}
}
// flushArrayQueueToShared(localFrontierQueue,sharedFrontierQueue);
// }
}
void breadthFirstSearchStreamEdgesColumnGraphGrid(struct GraphGrid *graph, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue **localFrontierQueues, struct BFSStats *stats)
{
// struct Timer* timer = (struct Timer*) malloc(sizeof(struct Timer));
uint32_t totalPartitions = 0;
totalPartitions = graph->grid->num_partitions; // PxP
#pragma omp parallel default(none) shared(stats,totalPartitions,localFrontierQueues ,sharedFrontierQueue, graph)
// #pragma omp single nowait
{
uint32_t t_id = omp_get_thread_num();
// uint32_t A = 0;
struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];
uint32_t j;
#pragma omp for
for (j = 0; j < totalPartitions; ++j)
{
uint32_t i;
for (i = 0; i < totalPartitions; ++i)
{
if(getBit(graph->grid->activePartitionsMap, (i * totalPartitions) + j))
{
// #pragma omp task untied
// {
breadthFirstSearchPartitionGraphGrid(graph, &(graph->grid->partitions[(i * totalPartitions) + j]), sharedFrontierQueue, localFrontierQueue, stats);
flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);
// }
}
}
}
}
// flushArrayQueueToShared(localFrontierQueue,sharedFrontierQueue);
// }
}
void breadthFirstSearchPartitionGraphGrid(struct GraphGrid *graph, struct Partition *partition, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue *localFrontierQueue, struct BFSStats *stats)
{
uint32_t i;
uint32_t src;
uint32_t dest;
// #pragma omp parallel default(none) private(i,src,dest) shared(localFrontierQueuesL2,graph,partition,sharedFrontierQueue,localFrontierQueue)
// {
// uint32_t t_id = omp_get_thread_num();
// struct ArrayQueue* localFrontierQueueL2 = localFrontierQueuesL2[t_id];
// #pragma omp for schedule(dynamic, 1024)
for (i = 0; i < partition->num_edges; ++i)
{
src = partition->edgeList->edges_array_src[i];
dest = partition->edgeList->edges_array_dest[i];
int v_dest = stats->parents[dest];
if(isEnArrayQueued(sharedFrontierQueue, src) && (v_dest < 0))
{
// if(__sync_bool_compare_and_swap(&stats->parents[dest], v_dest, src))
// {
stats->parents[dest] = src;
stats->distances[dest] = stats->distances[src] + 1;
enArrayQueue(localFrontierQueue, dest);
// }
}
}
// flushArrayQueueToShared(localFrontierQueueL2,localFrontierQueue);
// // slideWindowArrayQueue(localFrontierQueue);
// localFrontierQueue->tail = localFrontierQueue->tail_next; // to apply to condition to the next flush
// }
}
void breadthFirstSearchSetActivePartitions(struct GraphGrid *graph, struct ArrayQueue *sharedFrontierQueue)
{
uint32_t i;
uint32_t v;
// graphGridResetActivePartitions(graph->grid);
graphGridResetActivePartitionsMap(graph->grid);
#pragma omp parallel for default(none) shared(graph,sharedFrontierQueue) private(i,v) schedule(dynamic,1024)
for(i = sharedFrontierQueue->head ; i < sharedFrontierQueue->tail; i++)
{
v = sharedFrontierQueue->queue[i];
// graphGridSetActivePartitions(graph->grid, v);
// if(getBit(graph->grid->activePartitionsMap,i))
graphGridSetActivePartitionsMap(graph->grid, v);
}
}
// ********************************************************************************************
// *************** GRID DataStructure/Bitmap Frontiers **************
// ********************************************************************************************
// function STREAMVERTICES(Fv,F)
// Sum = 0
// for each vertex do
// if F(vertex) then
// Sum += Fv(edge)
// end if
// end for
// return Sum
// end function
// function STREAMEDGES(Fe,F)
// Sum = 0
// for each active block do >> block with active edges
// for each edge ∈ block do
// if F(edge.arguments->source) then
// Sum += Fe(edge)
// end if
// end for
// end for
// return Sum
// end function
//we assume that the edges are not sorted in each partition
struct BFSStats *breadthFirstSearchRowGraphGridBitmap(struct Arguments *arguments, struct GraphGrid *graph)
{
struct BFSStats *stats = newBFSStatsGraphGrid(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS-Row Bitmap (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_iteration = (struct Timer *) malloc(sizeof(struct Timer));
struct Bitmap *FrontierBitmapCurr = newBitmap(graph->num_vertices);
struct Bitmap *FrontierBitmapNext = newBitmap(graph->num_vertices);
graphGridReset(graph);
uint32_t processed_nodes = 0;
uint32_t total_processed_nodes = 0;
Start(timer_iteration);
setBit(FrontierBitmapNext, arguments->source);
stats->parents[arguments->source] = arguments->source;
processed_nodes = getNumOfSetBits(FrontierBitmapNext);
swapBitmaps (&FrontierBitmapCurr, &FrontierBitmapNext);
clearBitmap(FrontierBitmapNext);
// printf("%u %u\n",getNumOfSetBits(FrontierBitmapCurr),getNumOfSetBits(FrontierBitmapNext) );
breadthFirstSearchSetActivePartitionsBitmap(graph, FrontierBitmapCurr);
Stop(timer_iteration);
total_processed_nodes += processed_nodes;
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, processed_nodes, Seconds(timer_iteration));
stats->time_total += Seconds(timer_iteration);
Start(timer);
while(processed_nodes) // start while
{
Start(timer_iteration);
breadthFirstSearchStreamEdgesRowGraphGridBitmap(graph, FrontierBitmapCurr, FrontierBitmapNext, stats);
Stop(timer_iteration);
processed_nodes = getNumOfSetBits(FrontierBitmapNext);
swapBitmaps (&FrontierBitmapCurr, &FrontierBitmapNext);
clearBitmap(FrontierBitmapNext);
breadthFirstSearchSetActivePartitionsBitmap(graph, FrontierBitmapCurr);
total_processed_nodes += processed_nodes;
stats->time_total += Seconds(timer_iteration);
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, processed_nodes, Seconds(timer_iteration));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", total_processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "**", total_processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeBitmap(FrontierBitmapCurr);
freeBitmap(FrontierBitmapNext);
free(timer_iteration);
free(timer);
return stats;
}
struct BFSStats *breadthFirstSearchColumnGraphGridBitmap(struct Arguments *arguments, struct GraphGrid *graph)
{
struct BFSStats *stats = newBFSStatsGraphGrid(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS-Column Bitmap (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_iteration = (struct Timer *) malloc(sizeof(struct Timer));
struct Bitmap *FrontierBitmapCurr = newBitmap(graph->num_vertices);
struct Bitmap *FrontierBitmapNext = newBitmap(graph->num_vertices);
graphGridReset(graph);
uint32_t processed_nodes = 0;
uint32_t total_processed_nodes = 0;
Start(timer_iteration);
setBit(FrontierBitmapNext, arguments->source);
stats->parents[arguments->source] = arguments->source;
processed_nodes = getNumOfSetBits(FrontierBitmapNext);
swapBitmaps (&FrontierBitmapCurr, &FrontierBitmapNext);
clearBitmap(FrontierBitmapNext);
// printf("%u %u\n",getNumOfSetBits(FrontierBitmapCurr),getNumOfSetBits(FrontierBitmapNext) );
breadthFirstSearchSetActivePartitionsBitmap(graph, FrontierBitmapCurr);
Stop(timer_iteration);
total_processed_nodes += processed_nodes;
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, processed_nodes, Seconds(timer_iteration));
stats->time_total += Seconds(timer_iteration);
Start(timer);
while(processed_nodes) // start while
{
Start(timer_iteration);
breadthFirstSearchStreamEdgesColumnGraphGridBitmap(graph, FrontierBitmapCurr, FrontierBitmapNext, stats);
Stop(timer_iteration);
processed_nodes = getNumOfSetBits(FrontierBitmapNext);
swapBitmaps (&FrontierBitmapCurr, &FrontierBitmapNext);
clearBitmap(FrontierBitmapNext);
breadthFirstSearchSetActivePartitionsBitmap(graph, FrontierBitmapCurr);
total_processed_nodes += processed_nodes;
stats->time_total += Seconds(timer_iteration);
printf("| %-15u | %-15u | %-15f | \n", stats->iteration++, processed_nodes, Seconds(timer_iteration));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", total_processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "**", total_processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeBitmap(FrontierBitmapCurr);
freeBitmap(FrontierBitmapNext);
free(timer_iteration);
free(timer);
return stats;
}
// function STREAMEDGES(Fe,F)
// Sum = 0
// for each active block do >> block with active edges
// for each edge ∈ block do
// if F(edge.arguments->source) then
// Sum += Fe(edge)
// end if
// end for
// end for
// return Sum
// end function
//we assume that the edges are not sorted in each partition
void breadthFirstSearchStreamEdgesRowGraphGridBitmap(struct GraphGrid *graph, struct Bitmap *FrontierBitmapCurr, struct Bitmap *FrontierBitmapNext, struct BFSStats *stats)
{
// struct Timer* timer = (struct Timer*) malloc(sizeof(struct Timer));
uint32_t totalPartitions = 0;
totalPartitions = graph->grid->num_partitions; // PxP
uint32_t i;
for (i = 0; i < totalPartitions; ++i)
{
uint32_t j;
#pragma omp parallel for default(none) shared(i,stats,totalPartitions,FrontierBitmapCurr ,FrontierBitmapNext, graph)
for (j = 0; j < totalPartitions; ++j)
{
if(getBit(graph->grid->activePartitionsMap, (i * totalPartitions) + j) && graph->grid->partitions[(i * totalPartitions) + j].num_edges)
{
breadthFirstSearchPartitionGraphGridBitmap(graph, &(graph->grid->partitions[(i * totalPartitions) + j]), FrontierBitmapCurr, FrontierBitmapNext, stats);
}
}
}
}
void breadthFirstSearchStreamEdgesColumnGraphGridBitmap(struct GraphGrid *graph, struct Bitmap *FrontierBitmapCurr, struct Bitmap *FrontierBitmapNext, struct BFSStats *stats)
{
// struct Timer* timer = (struct Timer*) malloc(sizeof(struct Timer));
uint32_t totalPartitions = 0;
totalPartitions = graph->grid->num_partitions; // PxP
#pragma omp parallel default(none) shared(stats,totalPartitions,FrontierBitmapCurr ,FrontierBitmapNext, graph)
// #pragma omp single nowait
{
uint32_t j;
// #pragma omp for schedule(dynamic, 256)
#pragma omp for
for (j = 0; j < totalPartitions; ++j)
{
uint32_t i;
for (i = 0; i < totalPartitions; ++i)
{
if(getBit(graph->grid->activePartitionsMap, (i * totalPartitions) + j) && graph->grid->partitions[(i * totalPartitions) + j].num_edges)
{
breadthFirstSearchPartitionGraphGridBitmap(graph, &(graph->grid->partitions[(i * totalPartitions) + j]), FrontierBitmapCurr, FrontierBitmapNext, stats);
}
}
}
}
}
void breadthFirstSearchPartitionGraphGridBitmap(struct GraphGrid *graph, struct Partition *partition, struct Bitmap *FrontierBitmapCurr, struct Bitmap *FrontierBitmapNext, struct BFSStats *stats)
{
uint32_t i;
uint32_t src;
uint32_t dest;
for (i = 0; i < partition->num_edges; ++i)
{
src = partition->edgeList->edges_array_src[i];
dest = partition->edgeList->edges_array_dest[i];
int v_dest = stats->parents[dest];
if((v_dest < 0))
{
if(getBit(FrontierBitmapCurr, src))
{
// if(__sync_bool_compare_and_swap(&stats->parents[dest], v_dest, src))
// {
stats->parents[dest] = src;
stats->distances[dest] = stats->distances[src] + 1;
setBitAtomic(FrontierBitmapNext, dest);
// }
}
}
}
}
void breadthFirstSearchSetActivePartitionsBitmap(struct GraphGrid *graph, struct Bitmap *FrontierBitmap)
{
uint32_t i;
graphGridResetActivePartitionsMap(graph->grid);
#pragma omp parallel for default(none) shared(graph,FrontierBitmap) private(i) schedule(dynamic,1024)
for(i = 0 ; i < FrontierBitmap->size; i++)
{
if(getBit(FrontierBitmap, i))
graphGridSetActivePartitionsMap(graph->grid, i);
}
}
// ********************************************************************************************
// *************** ArrayList DataStructure **************
// ********************************************************************************************
struct BFSStats *breadthFirstSearchGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
struct BFSStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = breadthFirstSearchPullGraphAdjArrayList(arguments, graph);
break;
case 1: // push
stats = breadthFirstSearchPushGraphAdjArrayList(arguments, graph);
break;
case 2: // pull/push
stats = breadthFirstSearchDirectionOptimizedGraphAdjArrayList(arguments, graph);
break;
default:// push
stats = breadthFirstSearchDirectionOptimizedGraphAdjArrayList(arguments, graph);
break;
}
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPullGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
struct BFSStats *stats = newBFSStatsGraphAdjArrayList(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PULL/BU (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
Start(timer_inner);
setBit(sharedFrontierQueue->q_bitmap_next, arguments->source);
sharedFrontierQueue->q_bitmap_next->numSetBits = 1;
stats->parents[arguments->source] = arguments->source;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while (sharedFrontierQueue->q_bitmap->numSetBits)
{
Start(timer_inner);
nf = bottomUpStepGraphAdjArrayList(graph, sharedFrontierQueue->q_bitmap, sharedFrontierQueue->q_bitmap_next, stats);
sharedFrontierQueue->q_bitmap_next->numSetBits = nf;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
//stats
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPushGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
struct BFSStats *stats = newBFSStatsGraphAdjArrayList(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PUSH/TD (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
Start(timer_inner);
topDownStepGraphAdjArrayList(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchDirectionOptimizedGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
struct BFSStats *stats = newBFSStatsGraphAdjArrayList(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
struct Bitmap *bitmapCurr = newBitmap(graph->num_vertices);
struct Bitmap *bitmapNext = newBitmap(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
uint32_t mu = graph->num_edges; // number of edges to check from sharedFrontierQueue
uint32_t mf = graph->vertices[arguments->source].out_degree; // number of edges from unexplored verticies
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
uint32_t nf_prev = 0; // number of vertices in sharedFrontierQueue
uint32_t n = graph->num_vertices; // number of nodes
uint32_t alpha = 15;
uint32_t beta = 18;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
if(mf > (mu / alpha))
{
Start(timer_inner);
arrayQueueToBitmap(sharedFrontierQueue, bitmapCurr);
nf = sizeArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
printf("| E %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
do
{
Start(timer_inner);
nf_prev = nf;
nf = bottomUpStepGraphAdjArrayList(graph, bitmapCurr, bitmapNext, stats);
swapBitmaps(&bitmapCurr, &bitmapNext);
clearBitmap(bitmapNext);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
}
while(( nf > nf_prev) || // growing;
( nf > (n / beta)));
Start(timer_inner);
bitmapToArrayQueue(bitmapCurr, sharedFrontierQueue, localFrontierQueues);
Stop(timer_inner);
printf("| C %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
mf = 1;
}
else
{
Start(timer_inner);
mu -= mf;
mf = topDownStepGraphAdjArrayList(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
}
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
freeBitmap(bitmapNext);
freeBitmap(bitmapCurr);
free(timer);
free(timer_inner);
return stats;
}
// top-down-step(graph, sharedFrontierQueue, next, parents)
// for v ∈ sharedFrontierQueue do
// for u ∈ neighbors[v] do
// if parents[u] = -1 then
// parents[u] ← v
// next ← next ∪ {u}
// end if
// end for
// end for
uint32_t topDownStepGraphAdjArrayList(struct GraphAdjArrayList *graph, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue **localFrontierQueues, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t i;
uint32_t j;
uint32_t mf = 0;
uint32_t out_degree;
struct EdgeList *outNodes;
#pragma omp parallel default (none) private(out_degree,outNodes,u,v,j,i) shared(stats,localFrontierQueues,graph,sharedFrontierQueue,mf)
{
uint32_t t_id = omp_get_thread_num();
struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];
#pragma omp for reduction(+:mf) schedule(auto)
for(i = sharedFrontierQueue->head ; i < sharedFrontierQueue->tail; i++)
{
v = sharedFrontierQueue->queue[i];
// v = deArrayQueue(sharedFrontierQueue);
outNodes = graph->vertices[v].outNodes;
out_degree = graph->vertices[v].out_degree;
for(j = 0 ; j < out_degree ; j++)
{
u = outNodes->edges_array_dest[j];
int u_parent = stats->parents[u];
if(u_parent < 0 )
{
if(__sync_bool_compare_and_swap(&stats->parents[u], u_parent, v))
{
enArrayQueue(localFrontierQueue, u);
stats->distances[u] = stats->distances[v] + 1;
mf += -(u_parent);
}
}
}
}
flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);
}
return mf;
}
// bottom-up-step(graph, sharedFrontierQueue, next, parents)
// for v ∈ vertices do
// if parents[v] = -1 then
// for u ∈ neighbors[v] do
// if u ∈ sharedFrontierQueue then
// parents[v] ← u
// next ← next ∪ {v}
// break
// end if
// end for
// end if
// end for
uint32_t bottomUpStepGraphAdjArrayList(struct GraphAdjArrayList *graph, struct Bitmap *bitmapCurr, struct Bitmap *bitmapNext, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t j;
// uint32_t processed_nodes = bitmapCurr->numSetBits;
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
// stats->processed_nodes += processed_nodes;
uint32_t degree;
struct EdgeList *Nodes;
#pragma omp parallel for default(none) private(Nodes,j,u,v,degree) shared(stats,bitmapCurr,bitmapNext,graph) reduction(+:nf) schedule(dynamic, 1024)
for(v = 0 ; v < graph->num_vertices ; v++)
{
if(stats->parents[v] < 0) // optmization
{
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->edges_array_dest[j];
if(getBit(bitmapCurr, u))
{
stats->parents[v] = u;
setBitAtomic(bitmapNext, v);
stats->distances[v] = stats->distances[u] + 1;
nf++;
break;
}
}
}
}
return nf;
}
// ********************************************************************************************
// *************** LinkedList DataStructure **************
// ********************************************************************************************
struct BFSStats *breadthFirstSearchGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
struct BFSStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = breadthFirstSearchPullGraphAdjLinkedList(arguments, graph);
break;
case 1: // push
stats = breadthFirstSearchPushGraphAdjLinkedList(arguments, graph);
break;
case 2: // pull/push
stats = breadthFirstSearchDirectionOptimizedGraphAdjLinkedList(arguments, graph);
break;
default:// push
stats = breadthFirstSearchDirectionOptimizedGraphAdjLinkedList(arguments, graph);
break;
}
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPullGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
struct BFSStats *stats = newBFSStatsGraphAdjLinkedList(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PULL/BU (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
Start(timer_inner);
setBit(sharedFrontierQueue->q_bitmap_next, arguments->source);
sharedFrontierQueue->q_bitmap_next->numSetBits = 1;
stats->parents[arguments->source] = arguments->source;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while (sharedFrontierQueue->q_bitmap->numSetBits)
{
Start(timer_inner);
nf = bottomUpStepGraphAdjLinkedList(graph, sharedFrontierQueue->q_bitmap, sharedFrontierQueue->q_bitmap_next, stats);
sharedFrontierQueue->q_bitmap_next->numSetBits = nf;
swapBitmaps(&sharedFrontierQueue->q_bitmap, &sharedFrontierQueue->q_bitmap_next);
clearBitmap(sharedFrontierQueue->q_bitmap_next);
Stop(timer_inner);
//stats
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchPushGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
struct BFSStats *stats = newBFSStatsGraphAdjLinkedList(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PUSH/TD (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
Start(timer_inner);
topDownStepGraphAdjLinkedList(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
free(timer);
free(timer_inner);
return stats;
}
// breadth-first-search(graph, arguments->source)
// sharedFrontierQueue ← {arguments->source}
// next ← {}
// parents ← [-1,-1,. . . -1]
// while sharedFrontierQueue 6= {} do
// top-down-step(graph, sharedFrontierQueue, next, parents)
// sharedFrontierQueue ← next
// next ← {}
// end while
// return parents
struct BFSStats *breadthFirstSearchDirectionOptimizedGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
struct BFSStats *stats = newBFSStatsGraphAdjLinkedList(graph);
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting BFS PULL/PUSH (SOURCE NODE)");
printf(" -----------------------------------------------------\n");
printf("| %-51u | \n", arguments->source);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15s | %-15s | \n", "Iteration", "Nodes", "Time (Seconds)");
printf(" -----------------------------------------------------\n");
if(arguments->source > graph->num_vertices)
{
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "ERROR!! CHECK SOURCE RANGE");
printf(" -----------------------------------------------------\n");
return stats;
}
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct ArrayQueue *sharedFrontierQueue = newArrayQueue(graph->num_vertices);
struct Bitmap *bitmapCurr = newBitmap(graph->num_vertices);
struct Bitmap *bitmapNext = newBitmap(graph->num_vertices);
uint32_t P = arguments->algo_numThreads;
uint32_t mu = graph->num_edges; // number of edges to check from sharedFrontierQueue
uint32_t mf = graph->vertices[arguments->source].out_degree; // number of edges from unexplored verticies
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
uint32_t nf_prev = 0; // number of vertices in sharedFrontierQueue
uint32_t n = graph->num_vertices; // number of nodes
uint32_t alpha = 15;
uint32_t beta = 18;
struct ArrayQueue **localFrontierQueues = (struct ArrayQueue **) my_malloc( P * sizeof(struct ArrayQueue *));
uint32_t i;
for(i = 0 ; i < P ; i++)
{
localFrontierQueues[i] = newArrayQueue(graph->num_vertices);
}
Start(timer_inner);
enArrayQueue(sharedFrontierQueue, arguments->source);
// setBit(sharedFrontierQueue->q_bitmap,arguments->source);
stats->parents[arguments->source] = arguments->source;
Stop(timer_inner);
stats->time_total += Seconds(timer_inner);
// graph->vertices[arguments->source].visited = 1;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, ++stats->processed_nodes, Seconds(timer_inner));
Start(timer);
while(!isEmptyArrayQueue(sharedFrontierQueue)) // start while
{
if(mf > (mu / alpha))
{
Start(timer_inner);
arrayQueueToBitmap(sharedFrontierQueue, bitmapCurr);
nf = sizeArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
printf("| E %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
do
{
Start(timer_inner);
nf_prev = nf;
nf = bottomUpStepGraphAdjLinkedList(graph, bitmapCurr, bitmapNext, stats);
swapBitmaps(&bitmapCurr, &bitmapNext);
clearBitmap(bitmapNext);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += nf;
printf("| BU %-12u | %-15u | %-15f | \n", stats->iteration++, nf, Seconds(timer_inner));
}
while(( nf > nf_prev) || // growing;
( nf > (n / beta)));
Start(timer_inner);
bitmapToArrayQueue(bitmapCurr, sharedFrontierQueue, localFrontierQueues);
Stop(timer_inner);
printf("| C %-12s | %-15s | %-15f | \n", " ", " ", Seconds(timer_inner));
mf = 1;
}
else
{
Start(timer_inner);
mu -= mf;
mf = topDownStepGraphAdjLinkedList(graph, sharedFrontierQueue, localFrontierQueues, stats);
slideWindowArrayQueue(sharedFrontierQueue);
Stop(timer_inner);
//stats collection
stats->time_total += Seconds(timer_inner);
stats->processed_nodes += sharedFrontierQueue->tail - sharedFrontierQueue->head;;
printf("| TD %-12u | %-15u | %-15f | \n", stats->iteration++, sharedFrontierQueue->tail - sharedFrontierQueue->head, Seconds(timer_inner));
}
} // end while
Stop(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "No OverHead", stats->processed_nodes, stats->time_total);
printf(" -----------------------------------------------------\n");
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-15s | %-15u | %-15f | \n", "total", stats->processed_nodes, Seconds(timer));
printf(" -----------------------------------------------------\n");
for(i = 0 ; i < P ; i++)
{
freeArrayQueue(localFrontierQueues[i]);
}
free(localFrontierQueues);
freeArrayQueue(sharedFrontierQueue);
freeBitmap(bitmapNext);
freeBitmap(bitmapCurr);
free(timer);
free(timer_inner);
return stats;
}
// top-down-step(graph, sharedFrontierQueue, next, parents)
// for v ∈ sharedFrontierQueue do
// for u ∈ neighbors[v] do
// if parents[u] = -1 then
// parents[u] ← v
// next ← next ∪ {u}
// end if
// end for
// end for
uint32_t topDownStepGraphAdjLinkedList(struct GraphAdjLinkedList *graph, struct ArrayQueue *sharedFrontierQueue, struct ArrayQueue **localFrontierQueues, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t i;
uint32_t j;
uint32_t mf = 0;
uint32_t out_degree;
struct AdjLinkedListNode *outNodes;
#pragma omp parallel default (none) private(out_degree,outNodes,u,v,j,i) shared(stats,localFrontierQueues,graph,sharedFrontierQueue,mf)
{
uint32_t t_id = omp_get_thread_num();
struct ArrayQueue *localFrontierQueue = localFrontierQueues[t_id];
#pragma omp for reduction(+:mf) schedule(auto)
for(i = sharedFrontierQueue->head ; i < sharedFrontierQueue->tail; i++)
{
v = sharedFrontierQueue->queue[i];
// v = deArrayQueue(sharedFrontierQueue);
outNodes = graph->vertices[v].outNodes;
out_degree = graph->vertices[v].out_degree;
for(j = 0 ; j < out_degree ; j++)
{
u = outNodes->dest;
outNodes = outNodes->next; // travers pointer
int u_parent = stats->parents[u];
if(u_parent < 0 )
{
if(__sync_bool_compare_and_swap(&stats->parents[u], u_parent, v))
{
enArrayQueue(localFrontierQueue, u);
stats->distances[u] = stats->distances[v] + 1;
mf += -(u_parent);
}
}
}
}
flushArrayQueueToShared(localFrontierQueue, sharedFrontierQueue);
}
return mf;
}
// bottom-up-step(graph, sharedFrontierQueue, next, parents)
// for v ∈ vertices do
// if parents[v] = -1 then
// for u ∈ neighbors[v] do
// if u ∈ sharedFrontierQueue then
// parents[v] ← u
// next ← next ∪ {v}
// break
// end if
// end for
// end if
// end for
uint32_t bottomUpStepGraphAdjLinkedList(struct GraphAdjLinkedList *graph, struct Bitmap *bitmapCurr, struct Bitmap *bitmapNext, struct BFSStats *stats)
{
uint32_t v;
uint32_t u;
uint32_t j;
// uint32_t processed_nodes = bitmapCurr->numSetBits;
uint32_t nf = 0; // number of vertices in sharedFrontierQueue
// stats->processed_nodes += processed_nodes;
uint32_t degree;
struct AdjLinkedListNode *Nodes;
#pragma omp parallel for default(none) private(Nodes,j,u,v,degree) shared(stats,bitmapCurr,bitmapNext,graph) reduction(+:nf) schedule(dynamic, 1024)
for(v = 0 ; v < graph->num_vertices ; v++)
{
if(stats->parents[v] < 0) // optmization
{
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->dest;
Nodes = Nodes->next;
if(getBit(bitmapCurr, u))
{
stats->parents[v] = u;
setBitAtomic(bitmapNext, v);
stats->distances[v] = stats->distances[u] + 1;
nf++;
break;
}
}
}
}
return nf;
}
|
Mnist_Conv_Test.h | #pragma once
#include "omp.h"
#include "LoadCSV.h"
#include "LayerQL.h"
#include "PooLayerQL.h"
#include "Conv_LayerQL.h"
#include "Sigmoid_LayerQL.h"
#include "Dim_ReduceQL.h"
#include "Fullconnect_LayerQL.h"
#include "Inter_LayerQL.h"
#include <math.h>
#include "Relu_LayerQL.h"
#include "SoftMax_LayerQL.h"
#include "Data_AugmentationQL.h"
namespace tinyDNN
{
class Mnist_Conv_Test
{
public:
Mnist_Conv_Test()
{
//this->mnist_conv_01();
//this->mnist_conv_02();
//this->mnist_conv_03();
//this->mnist_conv_04();
//this->cifar_10_conv_01();
this->cifar_10_conv_01_01();
//this->cifar_10_conv_02();
//this->mnist_conv_05();
//this->accu = 0.0;
//this->cifar_10_conv_03();
}
~Mnist_Conv_Test() {}
double accu;
//��ʼ���ظ�����֤�Ƿ���ȷ
void mnist_conv_01()
{
LoadCSV::loadCSVTrain();
LoadCSV::loadCSV_Train_Vector();
std::shared_ptr<Inter_LayerQL<double>> in_01 = std::make_shared<Inter_LayerQL<double>>(28, 28);
in_01->forward_Matrix_Vector.push_back(LoadCSV::conv_Input_Vector[4]);
std::shared_ptr<LayerQL<double>> pool_01 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 14, 14); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_01 = in_01 + pool_01;
//**********************************************************************�ػ���
//1111111111111111111111111111111111111111111111111111111111111111111111
pool_01->calForward();
std::cout << o_01->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << (o_01->forward_Matrix_Vector[0]->getMatrixQL() * 9).cast<int>() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> conv_01 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 8, 14, 14, 5, 1, 2); //������
std::shared_ptr<Inter_LayerQL<double>> o_02 = o_01 + conv_01;
//**********************************************************************������
//2222222222222222222222222222222222222222222222222222222222222222222222
conv_01->calForward();
std::cout << o_02->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_02->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> sigmoid_01 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_03 = o_02 + sigmoid_01;
//**********************************************************************Sigmoid��
//3333333333333333333333333333333333333333333333333333333333333333333333
sigmoid_01->calForward(1);
std::cout << o_03->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_03->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> pool_02 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 7, 7); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_04 = o_03 + pool_02;
//**********************************************************************�ػ���
//4444444444444444444444444444444444444444444444444444444444444444444444
pool_02->calForward();
std::cout << o_04->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_04->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> dim_reduce_01 = std::make_shared<Dim_ReduceQL<double>>(Dim_Reduce_Layer, 8, 7, 7); //����
std::shared_ptr<Inter_LayerQL<double>> o_05 = o_04 + dim_reduce_01;
//**********************************************************************����
//55555555555555555555555555555555555555555555555555555555555555555555555
dim_reduce_01->calForward();
std::cout << o_05->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> fullconnect_01 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 392, 10); //ȫ���Ӳ�
std::shared_ptr<Inter_LayerQL<double>> o_06 = o_05 + fullconnect_01;
//**********************************************************************ȫ���Ӳ�
//66666666666666666666666666666666666666666666666666666666666666666666666
fullconnect_01->calForward();
std::cout << o_06->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> sigmoid_02 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_07 = o_06 + sigmoid_02;
//**********************************************************************Sigmoid��
//77777777777777777777777777777777777777777777777777777777777777777777777
sigmoid_02->calForward();
std::cout << o_07->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> lossLayer_01 = std::make_shared<MSE_Loss_LayerQL<double>>(MSE_Loss_Layer); //Loss��
std::shared_ptr<Inter_LayerQL<double>> o_08 = o_07 + lossLayer_01;
//**********************************************************************Loss��
//88888888888888888888888888888888888888888888888888888888888888888888888
lossLayer_01->calForward();
o_08->backward_Matrix->setMatrixQL() = LoadCSV::output_Layer->backward_Matrix->getMatrixQL().row(4);
for (int i = 0; i < 100; i++)
{
//1
pool_01->calForward();
//2
conv_01->calForward();
//3
sigmoid_01->calForward(1);
//4
pool_02->calForward();
//5
dim_reduce_01->calForward();
//6
fullconnect_01->calForward();
//7
sigmoid_02->calForward();
//8
lossLayer_01->calForward();
//8
lossLayer_01->calBackward();
//7
sigmoid_02->calBackward();
//6
fullconnect_01->calBackward();
//5
dim_reduce_01->calBackward();
//4
pool_02->calBackward();
//3
sigmoid_01->calBackward(1);
//2
conv_01->calBackward();
//1
pool_01->calBackward();
//6
fullconnect_01->upMatrix();
//2
conv_01->upMatrix();
}
std::cout << o_07->forward_Matrix->getMatrixQL() << std::endl;
}
//ѵ������ MNIST TRAIN �� �� MNIST TRAIN ������֤
void mnist_conv_02()
{
LoadCSV::loadCSVTrain();
LoadCSV::loadCSV_Train_Vector();
std::shared_ptr<Inter_LayerQL<double>> in_01 = std::make_shared<Inter_LayerQL<double>>(28, 28);
in_01->forward_Matrix_Vector.push_back(LoadCSV::conv_Input_Vector[4]);
std::shared_ptr<LayerQL<double>> pool_01 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 14, 14); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_01 = in_01 + pool_01;
//**********************************************************************�ػ���
//1111111111111111111111111111111111111111111111111111111111111111111111
pool_01->calForward();
std::cout << o_01->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << (o_01->forward_Matrix_Vector[0]->getMatrixQL() * 9).cast<int>() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> conv_01 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 8, 14, 14, 5, 1, 2); //������
std::shared_ptr<Inter_LayerQL<double>> o_02 = o_01 + conv_01;
//**********************************************************************������
//2222222222222222222222222222222222222222222222222222222222222222222222
conv_01->calForward();
std::cout << o_02->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_02->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> sigmoid_01 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_03 = o_02 + sigmoid_01;
//**********************************************************************Sigmoid��
//3333333333333333333333333333333333333333333333333333333333333333333333
sigmoid_01->calForward(1);
std::cout << o_03->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_03->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> pool_02 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 7, 7); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_04 = o_03 + pool_02;
//**********************************************************************�ػ���
//4444444444444444444444444444444444444444444444444444444444444444444444
pool_02->calForward();
std::cout << o_04->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_04->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> dim_reduce_01 = std::make_shared<Dim_ReduceQL<double>>(Dim_Reduce_Layer, 8, 7, 7); //����
std::shared_ptr<Inter_LayerQL<double>> o_05 = o_04 + dim_reduce_01;
//**********************************************************************����
//55555555555555555555555555555555555555555555555555555555555555555555555
dim_reduce_01->calForward();
std::cout << o_05->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> fullconnect_01 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 392, 10); //ȫ���Ӳ�
std::shared_ptr<Inter_LayerQL<double>> o_06 = o_05 + fullconnect_01;
//**********************************************************************ȫ���Ӳ�
//66666666666666666666666666666666666666666666666666666666666666666666666
fullconnect_01->calForward();
std::cout << o_06->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> sigmoid_02 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_07 = o_06 + sigmoid_02;
//**********************************************************************Sigmoid��
//77777777777777777777777777777777777777777777777777777777777777777777777
sigmoid_02->calForward();
std::cout << o_07->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> lossLayer_01 = std::make_shared<MSE_Loss_LayerQL<double>>(MSE_Loss_Layer); //Loss��
std::shared_ptr<Inter_LayerQL<double>> o_08 = o_07 + lossLayer_01;
//**********************************************************************Loss��
//88888888888888888888888888888888888888888888888888888888888888888888888
lossLayer_01->calForward();
o_08->backward_Matrix->setMatrixQL() = LoadCSV::output_Layer->backward_Matrix->getMatrixQL().row(4);
for (int i = 0; i < 1; i++)
{
for (int j = 0; j < 55000; j++)
{
//���
in_01->forward_Matrix_Vector.clear();
in_01->forward_Matrix_Vector.push_back(LoadCSV::conv_Input_Vector[j]);
o_08->backward_Matrix->setMatrixQL() = LoadCSV::output_Layer->backward_Matrix->getMatrixQL().row(j);
//1
pool_01->calForward();
//2
conv_01->calForward();
//3
sigmoid_01->calForward(1);
//4
pool_02->calForward();
//5
dim_reduce_01->calForward();
//6
fullconnect_01->calForward();
//7
sigmoid_02->calForward();
//8
lossLayer_01->calForward();
//8
lossLayer_01->calBackward();
//7
sigmoid_02->calBackward();
//6
fullconnect_01->calBackward();
//5
dim_reduce_01->calBackward();
//4
pool_02->calBackward();
//3
sigmoid_01->calBackward(1);
//2
conv_01->calBackward();
//1
pool_01->calBackward();
//6
fullconnect_01->upMatrix();
//2
conv_01->upMatrix();
}
}
for (int k = 1000; k < 1100; k++)
{
//���
in_01->forward_Matrix_Vector.clear();
in_01->forward_Matrix_Vector.push_back(LoadCSV::conv_Input_Vector[k]);
o_08->backward_Matrix->setMatrixQL() = LoadCSV::output_Layer->backward_Matrix->getMatrixQL().row(k);
//1
pool_01->calForward();
//2
conv_01->calForward();
//3
sigmoid_01->calForward(1);
//4
pool_02->calForward();
//5
dim_reduce_01->calForward();
//6
fullconnect_01->calForward();
//7
sigmoid_02->calForward();
//8
lossLayer_01->calForward();
//8
lossLayer_01->calBackward();
//7
sigmoid_02->calBackward();
//6
fullconnect_01->calBackward();
//5
dim_reduce_01->calBackward();
//4
pool_02->calBackward();
//3
sigmoid_01->calBackward(1);
//2
conv_01->calBackward();
//1
pool_01->calBackward();
//6
fullconnect_01->upMatrix();
//2
conv_01->upMatrix();
std::cout << k + 1 << " : *****************************************" << std::endl;
std::cout << (o_07->forward_Matrix->getMatrixQL() * 9).cast<int>() << std::endl;
}
}
//ѵ������ MNIST TRAIN �� �� MNIST TRAIN ������֤
void mnist_conv_03()
{
LoadCSV::loadCSVTrain();
LoadCSV::loadCSV_Train_Vector();
std::shared_ptr<Inter_LayerQL<double>> in_01 = std::make_shared<Inter_LayerQL<double>>(28, 28);
in_01->forward_Matrix_Vector.push_back(LoadCSV::conv_Input_Vector[4]);
std::shared_ptr<LayerQL<double>> pool_01 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 14, 14); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_01 = in_01 + pool_01;
//**********************************************************************�ػ���
//1111111111111111111111111111111111111111111111111111111111111111111111
pool_01->calForward();
std::cout << o_01->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << (o_01->forward_Matrix_Vector[0]->getMatrixQL() * 9).cast<int>() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> conv_01 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 8, 14, 14, 5, 1, 2); //������
std::shared_ptr<Inter_LayerQL<double>> o_02 = o_01 + conv_01;
//**********************************************************************������
//2222222222222222222222222222222222222222222222222222222222222222222222
conv_01->calForward();
std::cout << o_02->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_02->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> sigmoid_01 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_03 = o_02 + sigmoid_01;
//**********************************************************************Sigmoid��
//3333333333333333333333333333333333333333333333333333333333333333333333
sigmoid_01->calForward(1);
std::cout << o_03->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_03->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> pool_02 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 7, 7); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_04 = o_03 + pool_02;
//**********************************************************************�ػ���
//4444444444444444444444444444444444444444444444444444444444444444444444
pool_02->calForward();
std::cout << o_04->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_04->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> dim_reduce_01 = std::make_shared<Dim_ReduceQL<double>>(Dim_Reduce_Layer, 8, 7, 7); //����
std::shared_ptr<Inter_LayerQL<double>> o_05 = o_04 + dim_reduce_01;
//**********************************************************************����
//55555555555555555555555555555555555555555555555555555555555555555555555
dim_reduce_01->calForward();
std::cout << o_05->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> fullconnect_01 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 392, 10); //ȫ���Ӳ�
std::shared_ptr<Inter_LayerQL<double>> o_06 = o_05 + fullconnect_01;
//**********************************************************************ȫ���Ӳ�
//66666666666666666666666666666666666666666666666666666666666666666666666
fullconnect_01->calForward();
std::cout << o_06->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> sigmoid_02 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_07 = o_06 + sigmoid_02;
//**********************************************************************Sigmoid��
//77777777777777777777777777777777777777777777777777777777777777777777777
sigmoid_02->calForward();
std::cout << o_07->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> lossLayer_01 = std::make_shared<MSE_Loss_LayerQL<double>>(MSE_Loss_Layer); //Loss��
std::shared_ptr<Inter_LayerQL<double>> o_08 = o_07 + lossLayer_01;
//**********************************************************************Loss��
//88888888888888888888888888888888888888888888888888888888888888888888888
lossLayer_01->calForward();
o_08->backward_Matrix->setMatrixQL() = LoadCSV::output_Layer->backward_Matrix->getMatrixQL().row(4);
// ������س�ʼʱ��
DWORD load_time = GetTickCount();
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 55000; j++)
{
//���
in_01->forward_Matrix_Vector.clear();
in_01->forward_Matrix_Vector.push_back(LoadCSV::conv_Input_Vector[j]);
o_08->backward_Matrix->setMatrixQL() = LoadCSV::output_Layer->backward_Matrix->getMatrixQL().row(j);
//1
pool_01->calForward();
//2
conv_01->calForward();
//3
sigmoid_01->calForward(1);
//4
pool_02->calForward();
//5
dim_reduce_01->calForward();
//6
fullconnect_01->calForward();
//7
sigmoid_02->calForward();
//8
lossLayer_01->calForward();
//8
lossLayer_01->calBackward();
//7
sigmoid_02->calBackward();
//6
fullconnect_01->calBackward();
//5
dim_reduce_01->calBackward();
//4
pool_02->calBackward();
//3
sigmoid_01->calBackward(1);
//2
conv_01->calBackward();
//1
pool_01->calBackward();
//6
fullconnect_01->upMatrix();
//2
conv_01->upMatrix();
}
}
//*******************************************����
LoadCSV::loadCSVTest();
LoadCSV::loadCSV_Test_Vector();
double numTotal = 0;
for (int k = 0; k < 10000; k++)
{
//���
in_01->forward_Matrix_Vector.clear();
in_01->forward_Matrix_Vector.push_back(LoadCSV::conv_Input_Vector_T[k]);
o_08->backward_Matrix->setMatrixQL() = LoadCSV::output_Layer_T->backward_Matrix->getMatrixQL().row(k);
//1
pool_01->calForward(); //forward
//2
conv_01->calForward();
//3
sigmoid_01->calForward(1);
//4
pool_02->calForward();
//5
dim_reduce_01->calForward();
//6
fullconnect_01->calForward();
//7
sigmoid_02->calForward();
//8
lossLayer_01->calForward();
////8
//lossLayer_01->calBackward(); //back
////7
//sigmoid_02->calBackward();
////6
//fullconnect_01->calBackward();
////5
//dim_reduce_01->calBackward();
////4
//pool_02->calBackward();
////3
//sigmoid_01->calBackward(1);
////2
//conv_01->calBackward();
////1
//pool_01->calBackward();
////6
//fullconnect_01->upMatrix(); //up
////2
//conv_01->upMatrix();
//����õ������ֵλ��
int maxRow, maxColumn;
lossLayer_01->left_Layer->forward_Matrix->getMatrixQL().maxCoeff(&maxRow, &maxColumn);
//Lable�����ֵλ��
int maxRow_T, maxColumn_T;
lossLayer_01->right_Layer->backward_Matrix->getMatrixQL().maxCoeff(&maxRow_T, &maxColumn_T);
//�ж��Ƿ���ȣ�����ȣ���+1
if (maxColumn == maxColumn_T)
{
numTotal++;
}
}
//��ȷ��
std::cout << numTotal / 10000.00 << std::endl;
//ѵ���Ͳ�������ʱ��
DWORD star_time = GetTickCount();
//��������ʱ��
std::cout << "����������ʱ��Ϊ��" << (star_time - load_time) << "ms." << std::endl;
}
//��˫��������м��㣬��֤˫�����ķ�������ȷ�ԣ��� MNIST TRAIN ������֤
void mnist_conv_04()
{
LoadCSV::loadCSVTrain();
LoadCSV::loadCSV_Train_Vector();
std::shared_ptr<Inter_LayerQL<double>> in_01 = std::make_shared<Inter_LayerQL<double>>(28, 28);
in_01->forward_Matrix_Vector.push_back(LoadCSV::conv_Input_Vector[4]);
std::shared_ptr<LayerQL<double>> pool_01 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 14, 14); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_01 = in_01 + pool_01;
//**********************************************************************�ػ���
//1111111111111111111111111111111111111111111111111111111111111111111111
pool_01->calForward();
std::cout << o_01->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << (o_01->forward_Matrix_Vector[0]->getMatrixQL() * 9).cast<int>() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> conv_01 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 8, 14, 14, 5, 1, 2); //������
std::shared_ptr<Inter_LayerQL<double>> o_02 = o_01 + conv_01;
//**********************************************************************������
//2222222222222222222222222222222222222222222222222222222222222222222222
conv_01->calForward();
std::cout << o_02->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_02->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> sigmoid_01 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Conv_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_03 = o_02 + sigmoid_01;
//**********************************************************************Sigmoid��
//3333333333333333333333333333333333333333333333333333333333333333333333
sigmoid_01->calForward(1);
std::cout << o_03->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_03->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> pool_02 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 7, 7); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_04 = o_03 + pool_02;
//**********************************************************************�ػ���
//4444444444444444444444444444444444444444444444444444444444444444444444
pool_02->calForward();
std::cout << o_04->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_04->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> conv_02 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 8, 7, 7, 3, 8, 1); //������
std::shared_ptr<Inter_LayerQL<double>> o_04_02 = o_04 + conv_02;
conv_02->calForward();
std::shared_ptr<LayerQL<double>> dim_reduce_01 = std::make_shared<Dim_ReduceQL<double>>(Dim_Reduce_Layer, 8, 7, 7); //����
std::shared_ptr<Inter_LayerQL<double>> o_05 = o_04_02 + dim_reduce_01;
//**********************************************************************����
//55555555555555555555555555555555555555555555555555555555555555555555555
dim_reduce_01->calForward();
std::cout << o_05->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> fullconnect_01 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 8 * 7 * 7, 10); //ȫ���Ӳ�
std::shared_ptr<Inter_LayerQL<double>> o_06 = o_05 + fullconnect_01;
//**********************************************************************ȫ���Ӳ�
//66666666666666666666666666666666666666666666666666666666666666666666666
fullconnect_01->calForward();
std::cout << o_06->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> sigmoid_02 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_07 = o_06 + sigmoid_02;
//**********************************************************************Sigmoid��
//77777777777777777777777777777777777777777777777777777777777777777777777
sigmoid_02->calForward();
std::cout << o_07->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> lossLayer_01 = std::make_shared<MSE_Loss_LayerQL<double>>(MSE_Loss_Layer); //Loss��
std::shared_ptr<Inter_LayerQL<double>> o_08 = o_07 + lossLayer_01;
//**********************************************************************Loss��
//88888888888888888888888888888888888888888888888888888888888888888888888
lossLayer_01->calForward();
o_08->backward_Matrix->setMatrixQL() = LoadCSV::output_Layer->backward_Matrix->getMatrixQL().row(4);
conv_01->upConv = 0.5;
conv_02->upConv = 0.5;
fullconnect_01->upFull = 0.15;
// ������س�ʼʱ��
DWORD load_time = GetTickCount();
for (int i = 0; i < 1; i++)
{
for (int j = 0; j < 55000; j++)
{
//���
in_01->forward_Matrix_Vector.clear();
in_01->forward_Matrix_Vector.push_back(LoadCSV::conv_Input_Vector[j]);
o_08->backward_Matrix->setMatrixQL() = LoadCSV::output_Layer->backward_Matrix->getMatrixQL().row(j);
//1
pool_01->calForward();
//2
conv_01->calForward();
//3
sigmoid_01->calForward(1);
//4
pool_02->calForward();
//***********************************
conv_02->calForward();
//5
dim_reduce_01->calForward();
//6
fullconnect_01->calForward();
//7
sigmoid_02->calForward();
//8
lossLayer_01->calForward();
//8
lossLayer_01->calBackward();
//7
sigmoid_02->calBackward();
//6
fullconnect_01->calBackward();
//5
dim_reduce_01->calBackward();
//***********************************
conv_02->calBackward();
//4
pool_02->calBackward();
//3
sigmoid_01->calBackward(1);
//2
conv_01->calBackward();
//1
pool_01->calBackward();
//6
fullconnect_01->upMatrix();
//2
conv_01->upMatrix();
}
}
//*******************************************����
LoadCSV::loadCSVTest();
LoadCSV::loadCSV_Test_Vector();
double numTotal = 0;
for (int k = 0; k < 10000; k++)
{
//���
in_01->forward_Matrix_Vector.clear();
in_01->forward_Matrix_Vector.push_back(LoadCSV::conv_Input_Vector_T[k]);
o_08->backward_Matrix->setMatrixQL() = LoadCSV::output_Layer_T->backward_Matrix->getMatrixQL().row(k);
//1
pool_01->calForward(); //forward
//2
conv_01->calForward();
//3
sigmoid_01->calForward(1);
//4
pool_02->calForward();
//***********************************
conv_02->calForward();
//5
dim_reduce_01->calForward();
//6
fullconnect_01->calForward();
//7
sigmoid_02->calForward();
//8
lossLayer_01->calForward();
////8
//lossLayer_01->calBackward(); //back
////7
//sigmoid_02->calBackward();
////6
//fullconnect_01->calBackward();
////5
//dim_reduce_01->calBackward();
////4
//pool_02->calBackward();
////3
//sigmoid_01->calBackward(1);
////2
//conv_01->calBackward();
////1
//pool_01->calBackward();
////6
//fullconnect_01->upMatrix(); //up
////2
//conv_01->upMatrix();
//����õ������ֵλ��
int maxRow, maxColumn;
lossLayer_01->left_Layer->forward_Matrix->getMatrixQL().maxCoeff(&maxRow, &maxColumn);
//Lable�����ֵλ��
int maxRow_T, maxColumn_T;
lossLayer_01->right_Layer->backward_Matrix->getMatrixQL().maxCoeff(&maxRow_T, &maxColumn_T);
//�ж��Ƿ���ȣ�����ȣ���+1
if (maxColumn == maxColumn_T)
{
numTotal++;
}
}
//��ȷ��
std::cout << numTotal / 10000.00 << std::endl;
//ѵ���Ͳ�������ʱ��
DWORD star_time = GetTickCount();
//��������ʱ��
std::cout << "����������ʱ��Ϊ��" << (star_time - load_time) << "ms." << std::endl;
}
//�������þ��̲�ͳػ����Sigmoid��ͽ�ά�㣬������ȫ���Ӳ㣬����֤��������ȷ��
void mnist_conv_05()
{
LoadCSV::loadCSVTrain();
LoadCSV::loadCSV_Train_Vector();
LoadCSV::loadCSVTest();
LoadCSV::loadCSV_Test_Vector();
std::shared_ptr<Inter_LayerQL<double>> in_01 = std::make_shared<Inter_LayerQL<double>>(28, 28);
std::shared_ptr<LayerQL<double>> pool_01 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 14, 14); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_01 = in_01 + pool_01;
std::shared_ptr<LayerQL<double>> conv_01 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 8, 14, 14, 5, 1, 2); //������
std::shared_ptr<Inter_LayerQL<double>> o_02 = o_01 + conv_01;
std::shared_ptr<LayerQL<double>> sigmoid_01 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Conv_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_03 = o_02 + sigmoid_01;
std::shared_ptr<LayerQL<double>> pool_02 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 7, 7); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_04 = o_03 + pool_02;
std::shared_ptr<LayerQL<double>> conv_02 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 8, 7, 7, 3, 8, 1); //������
std::shared_ptr<Inter_LayerQL<double>> o_05 = o_04 + conv_02;
std::shared_ptr<LayerQL<double>> sigmoid_02 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Conv_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_06 = o_05 + sigmoid_02;
std::shared_ptr<LayerQL<double>> dim_reduce_01 = std::make_shared<Dim_ReduceQL<double>>(Dim_Reduce_Layer, 8, 7, 7); //����
std::shared_ptr<Inter_LayerQL<double>> o_07 = o_06 + dim_reduce_01;
std::shared_ptr<LayerQL<double>> fullconnect_01 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 8 * 7 * 7, 10); //ȫ���Ӳ�
std::shared_ptr<Inter_LayerQL<double>> o_08 = o_07 + fullconnect_01;
std::shared_ptr<LayerQL<double>> sigmoid_03 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_09 = o_08 + sigmoid_03;
std::shared_ptr<LayerQL<double>> lossLayer_01 = std::make_shared<MSE_Loss_LayerQL<double>>(MSE_Loss_Layer); //Loss��
std::shared_ptr<Inter_LayerQL<double>> o_10 = o_09 + lossLayer_01;
// ������س�ʼʱ��
DWORD load_time = GetTickCount();
for (int i = 0; i < 1; i++)
{
// / pow(10, i / 5);
conv_01->upConv = 0.5 / pow(10, i / 2);
conv_02->upConv = 0.5 / pow(10, i / 2);
fullconnect_01->upFull = 0.12 / pow(10, i / 2);
for (int j = 0; j < 55000; j++)
{
//���
in_01->forward_Matrix_Vector.clear();
in_01->forward_Matrix_Vector.push_back(LoadCSV::conv_Input_Vector[j]);
o_10->backward_Matrix->setMatrixQL() = LoadCSV::output_Layer->backward_Matrix->getMatrixQL().row(j);
//��ͷ��ʼ����ǰ��
for (auto k = NetQL<double>::layerQLVector.begin(); k != NetQL<double>::layerQLVector.end(); k++)
{
(*k)->calForward();
}
//��ͷ��ʼ���� + Ȩ�ظ���
for (auto k = NetQL<double>::layerQLVector.rbegin(); k != NetQL<double>::layerQLVector.rend(); k++)
{
(*k)->calBackward();
(*k)->upMatrix();
}
}
}
double numTotal = 0;
for (int k = 0; k < 10000; k++)
{
//���
in_01->forward_Matrix_Vector.clear();
in_01->forward_Matrix_Vector.push_back(LoadCSV::conv_Input_Vector_T[k]);
o_10->backward_Matrix->setMatrixQL() = LoadCSV::output_Layer_T->backward_Matrix->getMatrixQL().row(k);
//ǰ����������
for (auto k = NetQL<double>::layerQLVector.begin(); k != NetQL<double>::layerQLVector.end(); k++)
{
(*k)->calForward();
}
//����õ������ֵλ��
int maxRow, maxColumn;
lossLayer_01->left_Layer->forward_Matrix->getMatrixQL().maxCoeff(&maxRow, &maxColumn);
//Lable�����ֵλ��
int maxRow_T, maxColumn_T;
lossLayer_01->right_Layer->backward_Matrix->getMatrixQL().maxCoeff(&maxRow_T, &maxColumn_T);
//�ж��Ƿ���ȣ�����ȣ���+1
if (maxColumn == maxColumn_T)
{
numTotal++;
}
}
//��ȷ��
std::cout << numTotal / 10000.00 << std::endl;
//ѵ���Ͳ�������ʱ��
DWORD star_time = GetTickCount();
//��������ʱ��
std::cout << "����������ʱ��Ϊ��" << (star_time - load_time) << "ms." << std::endl;
}
//ѵ������ Cifar-10
void cifar_10_conv_01()
{
LoadCifar_10::loadCifar_10_Train();
std::shared_ptr<Inter_LayerQL<double>> in_01 = std::make_shared<Inter_LayerQL<double>>(32, 32);
in_01->forward_Matrix_Vector = LoadCifar_10::cifar_Input_Vector[4];
std::shared_ptr<LayerQL<double>> pool_01 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 16, 16); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_01 = in_01 + pool_01;
//**********************************************************************�ػ���
//1111111111111111111111111111111111111111111111111111111111111111111111
pool_01->calForward();
std::cout << o_01->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << (o_01->forward_Matrix_Vector[0]->getMatrixQL() * 9).cast<int>() << std::endl;
//**********************************************************************
//return;
std::shared_ptr<LayerQL<double>> conv_01 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 16, 16, 16, 3, 3, 1); //������
std::shared_ptr<Inter_LayerQL<double>> o_02 = o_01 + conv_01;
//**********************************************************************������
//2222222222222222222222222222222222222222222222222222222222222222222222
conv_01->calForward();
std::cout << o_02->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_02->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
//return;
std::shared_ptr<LayerQL<double>> sigmoid_01 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Conv_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_03 = o_02 + sigmoid_01;
//**********************************************************************Sigmoid��
//3333333333333333333333333333333333333333333333333333333333333333333333
sigmoid_01->calForward(1);
std::cout << o_03->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_03->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> pool_02 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 8, 8); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_04 = o_03 + pool_02;
//**********************************************************************�ػ���
//4444444444444444444444444444444444444444444444444444444444444444444444
pool_02->calForward();
std::cout << o_04->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_04->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
//return;
std::shared_ptr<LayerQL<double>> dim_reduce_01 = std::make_shared<Dim_ReduceQL<double>>(Dim_Reduce_Layer, 16, 8, 8); //����
std::shared_ptr<Inter_LayerQL<double>> o_05 = o_04 + dim_reduce_01;
//**********************************************************************����
//55555555555555555555555555555555555555555555555555555555555555555555555
dim_reduce_01->calForward();
std::cout << o_05->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
//return;
std::shared_ptr<LayerQL<double>> fullconnect_01 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 16 * 8 * 8, 10); //ȫ���Ӳ�
std::shared_ptr<Inter_LayerQL<double>> o_06 = o_05 + fullconnect_01;
//**********************************************************************ȫ���Ӳ�
//66666666666666666666666666666666666666666666666666666666666666666666666
fullconnect_01->calForward();
std::cout << o_06->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
//return;
std::shared_ptr<LayerQL<double>> sigmoid_02 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_07 = o_06 + sigmoid_02;
//**********************************************************************Sigmoid��
//77777777777777777777777777777777777777777777777777777777777777777777777
sigmoid_02->calForward();
std::cout << o_07->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
//return;
std::shared_ptr<LayerQL<double>> lossLayer_01 = std::make_shared<MSE_Loss_LayerQL<double>>(MSE_Loss_Layer); //Loss��
std::shared_ptr<Inter_LayerQL<double>> o_08 = o_07 + lossLayer_01;
//**********************************************************************Loss��
//88888888888888888888888888888888888888888888888888888888888888888888888
lossLayer_01->calForward();
o_08->backward_Matrix->setMatrixQL() = LoadCifar_10::cifar_Out_Lable->getMatrixQL().row(4);
;
//return;
for (int i = 0; i < 8; i++)
{
// ������س�ʼʱ��
DWORD load_time = GetTickCount();
if (i < 2)
{
conv_01->upConv = 0.5;
fullconnect_01->upFull = 0.1;
}
else if (i < 4)
{
conv_01->upConv = 0.05;
fullconnect_01->upFull = 0.01;
}
else if (i < 6)
{
conv_01->upConv = 0.01;
fullconnect_01->upFull = 0.002;
}
else if (i < 8)
{
conv_01->upConv = 0.002;
fullconnect_01->upFull = 0.0004;
}
//conv_01->upConv = 0.5 / pow(10, i/4);
//fullconnect_01->upFull = 0.15 / pow(10, i/4);
for (int j = 0; j < 50000; j++)
{
std::cout << j << std::endl;
in_01->forward_Matrix_Vector = LoadCifar_10::cifar_Input_Vector[j];
o_08->backward_Matrix->setMatrixQL() = LoadCifar_10::cifar_Out_Lable->getMatrixQL().row(j);
//��ͷ��ʼ����ǰ��
for (auto k = NetQL<double>::layerQLVector.begin(); k != NetQL<double>::layerQLVector.end(); k++)
{
(*k)->calForward();
}
//��ͷ��ʼ���� + Ȩ�ظ���
for (auto k = NetQL<double>::layerQLVector.rbegin(); k != NetQL<double>::layerQLVector.rend(); k++)
{
(*k)->calBackward();
(*k)->upMatrix();
}
}
this->cifar_10_Test(in_01, o_08, lossLayer_01);
//ѵ���Ͳ�������ʱ��
DWORD star_time = GetTickCount();
//��������ʱ��
std::cout << "����������ʱ��Ϊ��" << (star_time - load_time) << "ms." << std::endl;
}
}
//ѵ������ Cifar-10
void cifar_10_conv_01_01()
{
LoadCifar_10::loadCifar_10_Train();
std::shared_ptr<Inter_LayerQL<double>> in_01 = std::make_shared<Inter_LayerQL<double>>(32, 32);
in_01->forward_Matrix_Vector = LoadCifar_10::cifar_Input_Vector[4];
//**********************************************************************������ǿ��
//0000000000000000000000000000000000000000000000000000000000000000000000
std::shared_ptr<LayerQL<double>> data_Aumentation_01 = std::make_shared<Data_AugmentationQL<double>>(Data_Augmentation_Layer, 0, 0);
std::shared_ptr<Inter_LayerQL<double>> o_00 = in_01 + data_Aumentation_01;
data_Aumentation_01->calForward();
std::shared_ptr<LayerQL<double>> pool_01 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 16, 16); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_01 = o_00 + pool_01;
//**********************************************************************�ػ���
//1111111111111111111111111111111111111111111111111111111111111111111111
pool_01->calForward();
std::cout << o_01->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << (o_01->forward_Matrix_Vector[0]->getMatrixQL() * 9).cast<int>() << std::endl;
//**********************************************************************
//return;
std::shared_ptr<LayerQL<double>> conv_01 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 16, 16, 16, 3, 3, 1); //������
std::shared_ptr<Inter_LayerQL<double>> o_02 = o_01 + conv_01;
//**********************************************************************������
//2222222222222222222222222222222222222222222222222222222222222222222222
conv_01->calForward();
std::cout << o_02->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_02->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
//return;
std::shared_ptr<LayerQL<double>> sigmoid_01 = std::make_shared<Relu_LayerQL<double>>(Relu_Conv_Layer); //Relu��
std::shared_ptr<Inter_LayerQL<double>> o_03 = o_02 + sigmoid_01;
sigmoid_01->pRelu_k = 0.1;
//**********************************************************************Relu��
//3333333333333333333333333333333333333333333333333333333333333333333333
sigmoid_01->calForward(1);
std::cout << o_03->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_03->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
//return;
std::shared_ptr<LayerQL<double>> pool_02 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 8, 8); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_04 = o_03 + pool_02;
//**********************************************************************�ػ���
//4444444444444444444444444444444444444444444444444444444444444444444444
pool_02->calForward();
std::cout << o_04->forward_Matrix_Vector[0]->getMatrixQL() << std::endl;
std::cout << "*******" << std::endl;
std::cout << o_04->forward_Matrix_Vector[7]->getMatrixQL() << std::endl;
//**********************************************************************
std::shared_ptr<LayerQL<double>> conv_02 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 16, 8, 8, 3, 16, 1); //������
std::shared_ptr<Inter_LayerQL<double>> o_04_02 = o_04 + conv_02;
//**********************************************************************������
//2222222222222222222222222222222222222222222222222222222222222222222222
conv_02->calForward();
std::shared_ptr<LayerQL<double>> sigmoid_01_02 = std::make_shared<Relu_LayerQL<double>>(Relu_Conv_Layer); //Relu��
std::shared_ptr<Inter_LayerQL<double>> o_04_03 = o_04_02 + sigmoid_01_02;
sigmoid_01_02->pRelu_k = 0.1;
//**********************************************************************Relu��
//3333333333333333333333333333333333333333333333333333333333333333333333
sigmoid_01_02->calForward(1);
std::shared_ptr<LayerQL<double>> pool_02_02 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 4, 4); //�ػ���
std::shared_ptr<Inter_LayerQL<double>> o_04_04 = o_04_03 + pool_02_02;
pool_02_02->calForward();
//return;
std::shared_ptr<LayerQL<double>> dim_reduce_01 = std::make_shared<Dim_ReduceQL<double>>(Dim_Reduce_Layer, 16, 4, 4); //����
std::shared_ptr<Inter_LayerQL<double>> o_05 = o_04_04 + dim_reduce_01;
//**********************************************************************����
//55555555555555555555555555555555555555555555555555555555555555555555555
dim_reduce_01->calForward();
std::cout << o_05->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
//return;
std::shared_ptr<LayerQL<double>> fullconnect_01 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 16 * 4 * 4, 10); //ȫ���Ӳ�
std::shared_ptr<Inter_LayerQL<double>> o_06 = o_05 + fullconnect_01;
//**********************************************************************ȫ���Ӳ�
//66666666666666666666666666666666666666666666666666666666666666666666666
fullconnect_01->calForward();
std::cout << o_06->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
//return;
std::shared_ptr<LayerQL<double>> sigmoid_02 = std::make_shared<Relu_LayerQL<double>>(Relu_Layer); //Relu��
std::shared_ptr<Inter_LayerQL<double>> o_07 = o_06 + sigmoid_02;
sigmoid_02->pRelu_k = 0.1;
//**********************************************************************Sigmoid��
//77777777777777777777777777777777777777777777777777777777777777777777777
sigmoid_02->calForward();
std::cout << o_07->forward_Matrix->getMatrixQL() << std::endl;
//**********************************************************************
//====================================================================================================================================================================
//std::shared_ptr<LayerQL<double>> fullconnect_01_02 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 30, 10);//ȫ���Ӳ�2
//std::shared_ptr<Inter_LayerQL<double>> o_07_02 = o_07 + fullconnect_01_02;
////**********************************************************************ȫ���Ӳ�
////7777777777777777777777777777777777777777111111111111111111111111111111
//fullconnect_01_02->calForward();
//std::shared_ptr<LayerQL<double>> sigmoid_02_02 = std::make_shared<Relu_LayerQL<double>>(Relu_Layer); //Relu��
//std::shared_ptr<Inter_LayerQL<double>> o_07_03 = o_07_02 + sigmoid_02_02;
//sigmoid_02_02->pRelu_k = 0.1;
//sigmoid_02_02->calForward();
//=====================================================================================================================================================================
//return;
std::shared_ptr<LayerQL<double>> lossLayer_01 = std::make_shared<SoftMax_LayerQL<double>>(SoftMax_Layer); //Loss��
std::shared_ptr<Inter_LayerQL<double>> o_08 = o_07 + lossLayer_01;
//**********************************************************************Loss��
//88888888888888888888888888888888888888888888888888888888888888888888888
lossLayer_01->calForward();
o_08->backward_Matrix->setMatrixQL() = LoadCifar_10::cifar_Out_Lable->getMatrixQL().row(4);
;
//return;
for (int i = 0; i < 14; i++)
{
// ������س�ʼʱ��
DWORD load_time = GetTickCount();
sigmoid_01->pRelu_k = 0.12;
sigmoid_01_02->pRelu_k = 0.12;
sigmoid_02->pRelu_k = 0.12;
//sigmoid_02_02->pRelu_k = 0.12;
if (i < 2)
{
conv_01->upConv = 0.02;
conv_02->upConv = 0.02;
fullconnect_01->upFull = 0.015;
//fullconnect_01_02->upFull = 0.015;
}
if (i < 4)
{
conv_01->upConv = 0.015;
conv_02->upConv = 0.015;
fullconnect_01->upFull = 0.009;
//fullconnect_01_02->upFull = 0.009;
}
else if (i < 6)
{
conv_01->upConv = 0.008;
conv_02->upConv = 0.008;
fullconnect_01->upFull = 0.006;
//fullconnect_01_02->upFull = 0.006;
}
//������ͻ��,�ص��ע���ѧϰ��
else if (i < 8)
{
conv_01->upConv = 0.004;
conv_02->upConv = 0.004;
fullconnect_01->upFull = 0.003;
//fullconnect_01_02->upFull = 0.003;
}
else if (i < 10)
{
conv_01->upConv = 0.002;
conv_02->upConv = 0.002;
fullconnect_01->upFull = 0.001;
//fullconnect_01_02->upFull = 0.001;
}
else if (i < 12)
{
conv_01->upConv = 0.001;
conv_02->upConv = 0.001;
fullconnect_01->upFull = 0.0005;
//fullconnect_01_02->upFull = 0.0005;
}
else if (i < 14)
{
conv_01->upConv = 0.0005;
conv_02->upConv = 0.0005;
fullconnect_01->upFull = 0.0002;
//fullconnect_01_02->upFull = 0.0002;
}
else if (i < 18)
{
conv_01->upConv = 0.00005;
conv_02->upConv = 0.00005;
fullconnect_01->upFull = 0.00005;
}
//conv_01->upConv = 0.5 / pow(10, i/4);
//fullconnect_01->upFull = 0.15 / pow(10, i/4);
//#pragma omp parallel
for (int j = 0; j < 50000; j++)
{
//std::cout << j << std::endl;
if (j % 10000 == 0)
std::cout << i << "::" << j << std::endl;
in_01->forward_Matrix_Vector = LoadCifar_10::cifar_Input_Vector[j];
o_08->backward_Matrix->setMatrixQL() = LoadCifar_10::cifar_Out_Lable->getMatrixQL().row(j);
//��ͷ��ʼ����ǰ��
//#pragma omp parallel
for (auto k = NetQL<double>::layerQLVector.begin(); k != NetQL<double>::layerQLVector.end(); k++)
{
(*k)->calForward();
}
//��ͷ��ʼ���� + Ȩ�ظ���
//#pragma omp parallel
for (auto k = NetQL<double>::layerQLVector.rbegin(); k != NetQL<double>::layerQLVector.rend(); k++)
{
(*k)->calBackward();
(*k)->upMatrix();
}
}
this->cifar_10_Test(in_01, o_08, lossLayer_01);
//ѵ���Ͳ�������ʱ��
DWORD star_time = GetTickCount();
//��������ʱ��
std::cout << "����������ʱ��Ϊ��" << (star_time - load_time) << "ms." << std::endl;
}
}
void cifar_10_Test(std::shared_ptr<Inter_LayerQL<double>> inLayer, std::shared_ptr<Inter_LayerQL<double>> endLayer, std::shared_ptr<LayerQL<double>> lossLayer)
{
double numTotal = 0;
for (int k = 0; k < 10000; k++)
{
inLayer->forward_Matrix_Vector = LoadCifar_10::cifar_Input_Vector_T[k];
endLayer->backward_Matrix->setMatrixQL() = LoadCifar_10::cifar_Out_Lable_T->getMatrixQL().row(k);
//ǰ����������
for (auto k = NetQL<double>::layerQLVector.begin(); k != NetQL<double>::layerQLVector.end(); k++)
{
(*k)->calForward();
}
//����õ������ֵλ��
int maxRow, maxColumn;
lossLayer->left_Layer->forward_Matrix->getMatrixQL().maxCoeff(&maxRow, &maxColumn);
//Lable�����ֵλ��
int maxRow_T, maxColumn_T;
lossLayer->right_Layer->backward_Matrix->getMatrixQL().maxCoeff(&maxRow_T, &maxColumn_T);
//�ж��Ƿ���ȣ�����ȣ���+1
if (maxColumn == maxColumn_T)
{
numTotal++;
}
}
//��ȷ��
this->accu = numTotal / 10000.00;
std::cout << this->accu << std::endl;
}
//�����ֱ������ ���������һ��ȫ���� ����Ч������
void cifar_10_conv_02()
{
LoadCifar_10::loadCifar_10_Train();
std::shared_ptr<Inter_LayerQL<double>> in_01 = std::make_shared<Inter_LayerQL<double>>(32, 32);
std::shared_ptr<LayerQL<double>> conv_01 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 16, 32, 32, 5, 3, 2); //������1
std::shared_ptr<Inter_LayerQL<double>> o_02 = in_01 + conv_01;
std::shared_ptr<LayerQL<double>> sigmoid_01 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Conv_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_03 = o_02 + sigmoid_01;
std::shared_ptr<LayerQL<double>> pool_01 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 16, 16); //�ػ���1
std::shared_ptr<Inter_LayerQL<double>> o_04 = o_03 + pool_01;
std::shared_ptr<LayerQL<double>> conv_02 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 16, 16, 16, 3, 16, 1); //������2
std::shared_ptr<Inter_LayerQL<double>> o_05 = o_04 + conv_02;
std::shared_ptr<LayerQL<double>> sigmoid_02 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Conv_Layer); //Sigmoid��2
std::shared_ptr<Inter_LayerQL<double>> o_06 = o_05 + sigmoid_02;
std::shared_ptr<LayerQL<double>> pool_02 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 8, 8); //�ػ���2
std::shared_ptr<Inter_LayerQL<double>> o_07 = o_06 + pool_02;
std::shared_ptr<LayerQL<double>> conv_03 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 8, 8, 8, 3, 16, 1); //������3
std::shared_ptr<Inter_LayerQL<double>> o_08 = o_07 + conv_03;
std::shared_ptr<LayerQL<double>> sigmoid_03 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Conv_Layer); //Sigmoid��3
std::shared_ptr<Inter_LayerQL<double>> o_09 = o_08 + sigmoid_03;
std::shared_ptr<LayerQL<double>> dim_reduce_01 = std::make_shared<Dim_ReduceQL<double>>(Dim_Reduce_Layer, 8, 8, 8); //����1
std::shared_ptr<Inter_LayerQL<double>> o_010 = o_09 + dim_reduce_01;
std::shared_ptr<LayerQL<double>> fullconnect_01 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 8 * 8 * 8, 10); //ȫ���Ӳ�
std::shared_ptr<Inter_LayerQL<double>> o_011 = o_010 + fullconnect_01;
std::shared_ptr<LayerQL<double>> sigmoid_04 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��4
std::shared_ptr<Inter_LayerQL<double>> o_012 = o_011 + sigmoid_04;
std::shared_ptr<LayerQL<double>> lossLayer_01 = std::make_shared<MSE_Loss_LayerQL<double>>(MSE_Loss_Layer); //Loss��
std::shared_ptr<Inter_LayerQL<double>> o_013 = o_012 + lossLayer_01;
////**********************************************************************�ػ���
////1111111111111111111111111111111111111111111111111111111111111111111111
//std::shared_ptr<LayerQL<double>> conv_01 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 16, 16, 16, 3, 3, 1); //������
//std::shared_ptr<Inter_LayerQL<double>> o_02 = o_01 + conv_01;
////**********************************************************************������
////2222222222222222222222222222222222222222222222222222222222222222222222
//std::shared_ptr<LayerQL<double>> sigmoid_01 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Conv_Layer); //Sigmoid��
//std::shared_ptr<Inter_LayerQL<double>> o_03 = o_02 + sigmoid_01;
////**********************************************************************Sigmoid��
////3333333333333333333333333333333333333333333333333333333333333333333333
//std::shared_ptr<LayerQL<double>> pool_02 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 8, 8); //�ػ���
//std::shared_ptr<Inter_LayerQL<double>> o_04 = o_03 + pool_02;
////**********************************************************************�ػ���
////4444444444444444444444444444444444444444444444444444444444444444444444
//std::shared_ptr<LayerQL<double>> dim_reduce_01 = std::make_shared<Dim_ReduceQL<double>>(Dim_Reduce_Layer, 16, 8, 8); //����
//std::shared_ptr<Inter_LayerQL<double>> o_05 = o_04 + dim_reduce_01;
////**********************************************************************����
////55555555555555555555555555555555555555555555555555555555555555555555555
//std::shared_ptr<LayerQL<double>> fullconnect_01 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 16 * 8 * 8, 10);//ȫ���Ӳ�
//std::shared_ptr<Inter_LayerQL<double>> o_06 = o_05 + fullconnect_01;
////**********************************************************************ȫ���Ӳ�
////66666666666666666666666666666666666666666666666666666666666666666666666
//std::shared_ptr<LayerQL<double>> sigmoid_02 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��
//std::shared_ptr<Inter_LayerQL<double>> o_07 = o_06 + sigmoid_02;
////**********************************************************************Sigmoid��
////77777777777777777777777777777777777777777777777777777777777777777777777
//std::shared_ptr<LayerQL<double>> lossLayer_01 = std::make_shared<MSE_Loss_LayerQL<double>>(MSE_Loss_Layer);//Loss��
//std::shared_ptr<Inter_LayerQL<double>> o_08 = o_07 + lossLayer_01;
////**********************************************************************Loss��
////88888888888888888888888888888888888888888888888888888888888888888888888
for (int i = 0; i < 20; i++)
{
// ������س�ʼʱ��
DWORD load_time = GetTickCount();
//if (i < 10)
//{
// conv_01->upConv = 0.5;
// conv_02->upConv = 0.5;
// conv_03->upConv = 0.5;
// fullconnect_01->upFull = 0.1;
//}
//else if (i < 4)
//{
// conv_01->upConv = 0.05;
// conv_02->upConv = 0.05;
// conv_03->upConv = 0.05;
// fullconnect_01->upFull = 0.01;
//}
//else if (i < 6)
//{
// conv_01->upConv = 0.01;
// conv_02->upConv = 0.01;
// conv_03->upConv = 0.01;
// fullconnect_01->upFull = 0.002;
//}
//else if (i < 8)
//{
// conv_01->upConv = 0.002;
// conv_02->upConv = 0.002;
// conv_03->upConv = 0.002;
// fullconnect_01->upFull = 0.0004;
//}
conv_01->upConv = 0.5 / pow(10, i / 5);
conv_02->upConv = 0.5 / pow(10, i / 5);
conv_03->upConv = 0.5 / pow(10, i / 5);
fullconnect_01->upFull = 0.1 / pow(10, i / 5);
for (int j = 0; j < 50000; j++)
{
in_01->forward_Matrix_Vector = LoadCifar_10::cifar_Input_Vector[j];
o_013->backward_Matrix->setMatrixQL() = LoadCifar_10::cifar_Out_Lable->getMatrixQL().row(j);
//��ͷ��ʼ����ǰ��
for (auto k = NetQL<double>::layerQLVector.begin(); k != NetQL<double>::layerQLVector.end(); k++)
{
(*k)->calForward();
}
//��ͷ��ʼ���� + Ȩ�ظ���
for (auto k = NetQL<double>::layerQLVector.rbegin(); k != NetQL<double>::layerQLVector.rend(); k++)
{
(*k)->calBackward();
(*k)->upMatrix();
}
}
this->cifar_10_Test(in_01, o_013, lossLayer_01);
//ѵ���Ͳ�������ʱ��
DWORD star_time = GetTickCount();
//��������ʱ��
std::cout << "����������ʱ��Ϊ��" << (star_time - load_time) << "ms." << std::endl;
}
}
//������� + ����ȫ���ӣ��μ�Tensorflowʵս88ҳ
void cifar_10_conv_03()
{
LoadCifar_10::loadCifar_10_Train();
std::shared_ptr<Inter_LayerQL<double>> in_01 = std::make_shared<Inter_LayerQL<double>>(32, 32);
std::shared_ptr<LayerQL<double>> conv_01 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 64, 32, 32, 5, 3, 2); //������1
std::shared_ptr<Inter_LayerQL<double>> o_02 = in_01 + conv_01;
std::shared_ptr<LayerQL<double>> sigmoid_01 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Conv_Layer); //Sigmoid��
std::shared_ptr<Inter_LayerQL<double>> o_03 = o_02 + sigmoid_01;
std::shared_ptr<LayerQL<double>> pool_01 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 16, 16); //�ػ���1
std::shared_ptr<Inter_LayerQL<double>> o_04 = o_03 + pool_01;
std::shared_ptr<LayerQL<double>> conv_02 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 64, 16, 16, 5, 64, 2); //������2
std::shared_ptr<Inter_LayerQL<double>> o_05 = o_04 + conv_02;
std::shared_ptr<LayerQL<double>> sigmoid_02 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Conv_Layer); //Sigmoid��2
std::shared_ptr<Inter_LayerQL<double>> o_06 = o_05 + sigmoid_02;
std::shared_ptr<LayerQL<double>> pool_02 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 8, 8); //�ػ���2
std::shared_ptr<Inter_LayerQL<double>> o_07 = o_06 + pool_02;
//std::shared_ptr<LayerQL<double>> conv_03 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 8, 8, 8, 3, 16, 1); //������3
//std::shared_ptr<Inter_LayerQL<double>> o_08 = o_07 + conv_03;
//std::shared_ptr<LayerQL<double>> sigmoid_03 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Conv_Layer); //Sigmoid��3
//std::shared_ptr<Inter_LayerQL<double>> o_09 = o_08 + sigmoid_03;
std::shared_ptr<LayerQL<double>> dim_reduce_01 = std::make_shared<Dim_ReduceQL<double>>(Dim_Reduce_Layer, 64, 8, 8); //����1
std::shared_ptr<Inter_LayerQL<double>> o_010 = o_07 + dim_reduce_01;
std::shared_ptr<LayerQL<double>> fullconnect_01 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 64 * 8 * 8, 384); //ȫ���Ӳ�
std::shared_ptr<Inter_LayerQL<double>> o_011 = o_010 + fullconnect_01;
std::shared_ptr<LayerQL<double>> sigmoid_03_01 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��--
std::shared_ptr<Inter_LayerQL<double>> o_011_01 = o_011 + sigmoid_03_01;
std::shared_ptr<LayerQL<double>> fullconnect_02 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 384, 192); //ȫ���Ӳ�2
std::shared_ptr<Inter_LayerQL<double>> o_011_02 = o_011_01 + fullconnect_02;
std::shared_ptr<LayerQL<double>> sigmoid_03_02 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��4
std::shared_ptr<Inter_LayerQL<double>> o_012_01 = o_011_02 + sigmoid_03_02;
std::shared_ptr<LayerQL<double>> fullconnect_03 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 192, 10); //ȫ���Ӳ�2
std::shared_ptr<Inter_LayerQL<double>> o_011_03 = o_012_01 + fullconnect_03;
std::shared_ptr<LayerQL<double>> sigmoid_04 = std::make_shared<Sigmoid_LayerQL<double>>(Sigmoid_Layer); //Sigmoid��4
std::shared_ptr<Inter_LayerQL<double>> o_012 = o_011_03 + sigmoid_04;
std::shared_ptr<LayerQL<double>> lossLayer_01 = std::make_shared<MSE_Loss_LayerQL<double>>(MSE_Loss_Layer); //Loss��
std::shared_ptr<Inter_LayerQL<double>> o_013 = o_012 + lossLayer_01;
for (int i = 0; i < 20; i++)
{
// ������س�ʼʱ��
DWORD load_time = GetTickCount();
//if (i < 10)
//{
// conv_01->upConv = 0.5;
// conv_02->upConv = 0.5;
// conv_03->upConv = 0.5;
// fullconnect_01->upFull = 0.1;
//}
//else if (i < 4)
//{
// conv_01->upConv = 0.05;
// conv_02->upConv = 0.05;
// conv_03->upConv = 0.05;
// fullconnect_01->upFull = 0.01;
//}
//else if (i < 6)
//{
// conv_01->upConv = 0.01;
// conv_02->upConv = 0.01;
// conv_03->upConv = 0.01;
// fullconnect_01->upFull = 0.002;
//}
//else if (i < 8)
//{
// conv_01->upConv = 0.002;
// conv_02->upConv = 0.002;
// conv_03->upConv = 0.002;
// fullconnect_01->upFull = 0.0004;
//}
conv_01->upConv = 0.5 / pow(10, i / 5);
conv_02->upConv = 0.5 / pow(10, i / 5);
//conv_03->upConv = 0.5 / pow(10, i / 5);
fullconnect_01->upFull = 0.1 / pow(10, i / 5);
fullconnect_02->upFull = 0.1 / pow(10, i / 5);
fullconnect_03->upFull = 0.1 / pow(10, i / 5);
for (int j = 0; j < 50000; j++)
{
std::cout << i << "::" << j << "::" << this->accu << std::endl;
in_01->forward_Matrix_Vector = LoadCifar_10::cifar_Input_Vector[j];
o_013->backward_Matrix->setMatrixQL() = LoadCifar_10::cifar_Out_Lable->getMatrixQL().row(j);
//��ͷ��ʼ����ǰ��
for (auto k = NetQL<double>::layerQLVector.begin(); k != NetQL<double>::layerQLVector.end(); k++)
{
(*k)->calForward();
}
//��ͷ��ʼ���� + Ȩ�ظ���
for (auto k = NetQL<double>::layerQLVector.rbegin(); k != NetQL<double>::layerQLVector.rend(); k++)
{
(*k)->calBackward();
(*k)->upMatrix();
}
}
this->cifar_10_Test(in_01, o_013, lossLayer_01);
//ѵ���Ͳ�������ʱ��
DWORD star_time = GetTickCount();
//��������ʱ��
std::cout << "����������ʱ��Ϊ��" << (star_time - load_time) << "ms." << std::endl;
}
}
};
} |
chi2double.c | // fast chi-squared distance function in x86 compiler intrinsics
// (C) 2007-2008 Christoph Lampert <christoph.lampert@gmail.com>
#include <stdio.h>
#include <values.h> // for FLT_MIN
/* We calculate calculate chi2=(a-b)**2/(a+b+FLT_MIN) to avoid division-by-zero:
If a+b != 0, then (a+b+FLT_MIN)==(a+b) and nothing changed.
If a+b == 0, then the numerator is 0 as well, and we don't divide by 0.
*/
/* Using compiler intrinsics (for SSE >=2) can have a huge speedup effect:
8x for float and 3.5x for double on Intel Core2.
You have to compile with the right CPU setting, e.g. gcc -march=k8 or -march=nocona */
#ifdef __SSE2__
#include <emmintrin.h> // for float
#endif
/* OpenMP allows to achieve almost linear speedup on multiCore CPUs: use gcc-4.2 -fopenmp */
#ifdef _OPENMP
#include <omp.h>
#endif
static inline double chi2_baseline_double(const int n, const double* const x, const double* const y) {
double result = 0.f;
int i;
for (i=0; i<n; i++) {
const double num = x[i]-y[i];
const double denom = 1./(x[i]+y[i]+DBL_MIN);
result += num*num*denom;
}
return result;
}
/* use compiler intrinsics for 2x parallel processing */
static inline double chi2_intrinsic_double(int n, const double* x, const double* y) {
double result=0;
const __m128d eps = _mm_set1_pd(DBL_MIN);
const __m128d zero = _mm_setzero_pd();
__m128d chi2 = _mm_setzero_pd();
for ( ; n>1; n-=2) {
const __m128d a = _mm_loadu_pd(x);
const __m128d b = _mm_loadu_pd(y);
x+=2;
y+=2;
const __m128d a_plus_b = _mm_add_pd(a,b);
const __m128d a_plus_b_plus_eps = _mm_add_pd(a_plus_b,eps);
const __m128d a_minus_b = _mm_sub_pd(a,b);
const __m128d a_minus_b_sq = _mm_mul_pd(a_minus_b, a_minus_b);
const __m128d quotient = _mm_div_pd(a_minus_b_sq, a_plus_b_plus_eps);
chi2 = _mm_add_pd(chi2, quotient);
}
const __m128d shuffle = _mm_shuffle_pd(chi2, chi2, _MM_SHUFFLE2(0,1));
const __m128d sum = _mm_add_pd(chi2, shuffle);
// with SSE3, we could use hadd_pd, but the difference is negligible
_mm_store_sd(&result,sum);
_mm_empty();
if (n)
result += chi2_baseline_double(n, x, y); // remaining entries
return result;
}
/* calculate the chi2-distance between two vectors/histograms */
double chi2_double(const int dim, const double* const x, const double* const y) {
double (*chi2_double)(const int, const double*, const double*) = chi2_baseline_double;
#ifdef __SSE2__
chi2_double = chi2_intrinsic_double;
#endif
return chi2_double(dim, x, y);
}
/* calculate the chi2-measure between two sets of vectors/histograms */
double chi2sym_distance_double(const int dim, const int nx, const double* const x,
double* const K) {
double (*chi2_double)(const int, const double*, const double*) = chi2_baseline_double;
#ifdef __SSE2__
chi2_double = chi2_intrinsic_double;
#endif
double sumK=0.;
#pragma omp parallel
{
int i,j;
#pragma omp for reduction (+:sumK) schedule (dynamic, 2)
for (i=0;i<nx;i++) {
K[i*nx+i]=0.;
for (j=0;j<i;j++) {
const double chi2 = chi2_double(dim, &x[i*dim], &x[j*dim]);
K[i*nx+j] = chi2;
K[j*nx+i] = chi2;
sumK += 2*chi2;
}
}
}
return sumK/((float)(nx*nx));
}
/* calculate the chi2-measure between two sets of vectors/histograms */
double chi2_distance_double(const int dim, const int nx, const double* const x,
const int ny, const double* const y, double* const K) {
double (*chi2_double)(const int, const double*, const double*) = chi2_baseline_double;
#ifdef __SSE2__
chi2_double = chi2_intrinsic_double;
#endif
double sumK=0.;
#pragma omp parallel
{
int i,j;
#pragma omp for reduction (+:sumK)
for (i=0;i<nx;i++)
for (j=0;j<ny;j++) {
const double chi2 = chi2_double(dim, &x[i*dim], &y[j*dim]);
K[i*ny+j] = chi2;
sumK += chi2;
}
}
return sumK/((float)(nx*ny));
}
#ifdef __MAIN__
#include <stdlib.h>
#include <malloc.h>
#include <time.h>
int main()
{
const int dim=3000;
const int n1=1000;
const int n2=2000;
int i,j;
/* test calculating a kernel with double entries */
double *data1 = (double*)memalign(16,dim*n1*sizeof(double));
double *data2 = (double*)memalign(16,dim*n2*sizeof(double));
double *K = (double*)malloc(n1*n2*sizeof(double));
if ((!data1) || (!data2) || (!K)) {
free(data1);
free(data2);
free(K);
return 1;
}
const clock_t before_init=clock();
for (i=0;i<n1*dim;i++)
data1[i]=1./(double)(i+1.);
for (i=0;i<n2*dim;i++)
data2[i]=1./(double)(i+1.);
const clock_t after_init=clock();
printf("init time: %8.4f\n",(after_init-before_init)*1./CLOCKS_PER_SEC);
const clock_t before_chi2=clock();
const double mean_K = chi2_distance_double(dim, n1, data1, n2, data2, K);
const clock_t after_chi2=clock();
printf("chi2 time: %8.4f\n",(after_chi2-before_chi2)*1./CLOCKS_PER_SEC);
printf("result: %e\n",mean_K);
free(data1);
free(data2);
free(K);
return 0;
}
#endif
|
csr.c | /*!
* \file
*
* \brief Various routines with dealing with CSR matrices
*
* \author George Karypis
* \version\verbatim $Id: csr.c 21044 2017-05-24 22:50:32Z karypis $ \endverbatim
*/
#include <GKlib.h>
#define OMPMINOPS 50000
/*************************************************************************/
/*! Allocate memory for a CSR matrix and initializes it
\returns the allocated matrix. The various fields are set to NULL.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Create()
{
gk_csr_t *mat=NULL;
if ((mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat")))
gk_csr_Init(mat);
return mat;
}
/*************************************************************************/
/*! Initializes the matrix
\param mat is the matrix to be initialized.
*/
/*************************************************************************/
void gk_csr_Init(gk_csr_t *mat)
{
memset(mat, 0, sizeof(gk_csr_t));
mat->nrows = mat->ncols = 0;
}
/*************************************************************************/
/*! Frees all the memory allocated for matrix.
\param mat is the matrix to be freed.
*/
/*************************************************************************/
void gk_csr_Free(gk_csr_t **mat)
{
if (*mat == NULL)
return;
gk_csr_FreeContents(*mat);
gk_free((void **)mat, LTERM);
}
/*************************************************************************/
/*! Frees only the memory allocated for the matrix's different fields and
sets them to NULL.
\param mat is the matrix whose contents will be freed.
*/
/*************************************************************************/
void gk_csr_FreeContents(gk_csr_t *mat)
{
gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval,
&mat->rowids, &mat->rlabels, &mat->rmap,
&mat->colptr, &mat->colind, &mat->colval,
&mat->colids, &mat->clabels, &mat->cmap,
&mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums,
&mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols,
&mat->rwgts, &mat->cwgts,
LTERM);
}
/*************************************************************************/
/*! Returns a copy of a matrix.
\param mat is the matrix to be duplicated.
\returns the newly created copy of the matrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Dup(gk_csr_t *mat)
{
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr,
gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr"));
if (mat->rowids)
nmat->rowids = gk_icopy(mat->nrows, mat->rowids,
gk_imalloc(mat->nrows, "gk_csr_Dup: rowids"));
if (mat->rlabels)
nmat->rlabels = gk_icopy(mat->nrows, mat->rlabels,
gk_imalloc(mat->nrows, "gk_csr_Dup: rlabels"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms"));
if (mat->rsums)
nmat->rsums = gk_fcopy(mat->nrows, mat->rsums,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rsums"));
if (mat->rsizes)
nmat->rsizes = gk_fcopy(mat->nrows, mat->rsizes,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rsizes"));
if (mat->rvols)
nmat->rvols = gk_fcopy(mat->nrows, mat->rvols,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rvols"));
if (mat->rwgts)
nmat->rwgts = gk_fcopy(mat->nrows, mat->rwgts,
gk_fmalloc(mat->nrows, "gk_csr_Dup: rwgts"));
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind,
gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval,
gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval"));
/* copy the col structure */
if (mat->colptr)
nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr,
gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr"));
if (mat->colids)
nmat->colids = gk_icopy(mat->ncols, mat->colids,
gk_imalloc(mat->ncols, "gk_csr_Dup: colids"));
if (mat->clabels)
nmat->clabels = gk_icopy(mat->ncols, mat->clabels,
gk_imalloc(mat->ncols, "gk_csr_Dup: clabels"));
if (mat->cnorms)
nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms,
gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms"));
if (mat->csums)
nmat->csums = gk_fcopy(mat->ncols, mat->csums,
gk_fmalloc(mat->ncols, "gk_csr_Dup: csums"));
if (mat->csizes)
nmat->csizes = gk_fcopy(mat->ncols, mat->csizes,
gk_fmalloc(mat->ncols, "gk_csr_Dup: csizes"));
if (mat->cvols)
nmat->cvols = gk_fcopy(mat->ncols, mat->cvols,
gk_fmalloc(mat->ncols, "gk_csr_Dup: cvols"));
if (mat->cwgts)
nmat->cwgts = gk_fcopy(mat->ncols, mat->cwgts,
gk_fmalloc(mat->ncols, "gk_csr_Dup: cwgts"));
if (mat->colind)
nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind,
gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind"));
if (mat->colval)
nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval,
gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containint a set of consecutive rows.
\param mat is the original matrix.
\param rstart is the starting row.
\param nrows is the number of rows from rstart to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows)
{
ssize_t i;
gk_csr_t *nmat;
if (rstart+nrows > mat->nrows)
return NULL;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
/* copy the row structure */
if (mat->rowptr)
nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart,
gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr"));
for (i=nrows; i>=0; i--)
nmat->rowptr[i] -= nmat->rowptr[0];
ASSERT(nmat->rowptr[0] == 0);
if (mat->rowids)
nmat->rowids = gk_icopy(nrows, mat->rowids+rstart,
gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids"));
if (mat->rnorms)
nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms"));
if (mat->rsums)
nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart,
gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums"));
ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]);
if (mat->rowind)
nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowind+mat->rowptr[rstart],
gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowind"));
if (mat->rowval)
nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
mat->rowval+mat->rowptr[rstart],
gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart],
"gk_csr_ExtractSubmatrix: rowval"));
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix containing a certain set of rows.
\param mat is the original matrix.
\param nrows is the number of rows to extract.
\param rind is the set of row numbers to extract.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind)
{
ssize_t i, ii, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<nrows; i++)
nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]];
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, ii=0; ii<nrows; ii++) {
i = rind[ii];
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Returns a submatrix corresponding to a specified partitioning of rows.
\param mat is the original matrix.
\param part is the partitioning vector of the rows.
\param pid is the partition ID that will be extracted.
\returns the row structure of the newly created submatrix.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid)
{
ssize_t i, j, nnz;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = 0;
nmat->ncols = mat->ncols;
for (nnz=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
nmat->nrows++;
nnz += mat->rowptr[i+1]-mat->rowptr[i];
}
}
nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr");
nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind");
nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval");
nmat->rowptr[0] = 0;
for (nnz=0, j=0, i=0; i<mat->nrows; i++) {
if (part[i] == pid) {
gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz);
gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz);
nnz += mat->rowptr[i+1]-mat->rowptr[i];
nmat->rowptr[++j] = nnz;
}
}
ASSERT(j == nmat->nrows);
return nmat;
}
/*************************************************************************/
/*! Splits the matrix into multiple sub-matrices based on the provided
color array.
\param mat is the original matrix.
\param color is an array of size equal to the number of non-zeros
in the matrix (row-wise structure). The matrix is split into
as many parts as the number of colors. For meaningfull results,
the colors should be numbered consecutively starting from 0.
\returns an array of matrices for each supplied color number.
*/
/**************************************************************************/
gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color)
{
ssize_t i, j;
int nrows, ncolors;
ssize_t *rowptr;
int *rowind;
float *rowval;
gk_csr_t **smats;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
ncolors = gk_imax(rowptr[nrows], color, 1)+1;
smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats");
for (i=0; i<ncolors; i++) {
smats[i] = gk_csr_Create();
smats[i]->nrows = mat->nrows;
smats[i]->ncols = mat->ncols;
smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
smats[color[j]]->rowptr[i]++;
}
for (i=0; i<ncolors; i++)
MAKECSR(j, nrows, smats[i]->rowptr);
for (i=0; i<ncolors; i++) {
smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind");
smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval");
}
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j];
smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j];
smats[color[j]]->rowptr[i]++;
}
}
for (i=0; i<ncolors; i++)
SHIFTCSR(j, nrows, smats[i]->rowptr);
return smats;
}
/**************************************************************************/
/*! Determines the format of the CSR matrix based on the extension.
\param filename is the name of the file.
\param the user-supplied format.
\returns the type. The extension of the file directly maps to the
name of the format.
*/
/**************************************************************************/
int gk_csr_DetermineFormat(char *filename, int format)
{
if (format != GK_CSR_FMT_AUTO)
return format;
format = GK_CSR_FMT_CSR;
char *extension = gk_getextname(filename);
if (!strcmp(extension, "csr"))
format = GK_CSR_FMT_CSR;
else if (!strcmp(extension, "ijv"))
format = GK_CSR_FMT_IJV;
else if (!strcmp(extension, "cluto"))
format = GK_CSR_FMT_CLUTO;
else if (!strcmp(extension, "metis"))
format = GK_CSR_FMT_METIS;
else if (!strcmp(extension, "binrow"))
format = GK_CSR_FMT_BINROW;
else if (!strcmp(extension, "bincol"))
format = GK_CSR_FMT_BINCOL;
else if (!strcmp(extension, "bijv"))
format = GK_CSR_FMT_BIJV;
gk_free((void **)&extension, LTERM);
return format;
}
/**************************************************************************/
/*! Reads a CSR matrix from the supplied file and stores it the matrix's
forward structure.
\param filename is the file that stores the data.
\param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO,
GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL
specifying the type of the input format.
The GK_CSR_FMT_CSR does not contain a header
line, whereas the GK_CSR_FMT_BINROW is a binary format written
by gk_csr_Write() using the same format specifier.
\param readvals is either 1 or 0, indicating if the CSR file contains
values or it does not. It only applies when GK_CSR_FMT_CSR is
used.
\param numbering is either 1 or 0, indicating if the numbering of the
indices start from 1 or 0, respectively. If they start from 1,
they are automatically decreamented during input so that they
will start from 0. It only applies when GK_CSR_FMT_CSR is
used.
\returns the matrix that was read.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering)
{
ssize_t i, k, l;
size_t nfields, nrows, ncols, nnz, fmt, ncon;
size_t lnlen;
ssize_t *rowptr;
int *rowind, *iinds, *jinds, ival;
float *rowval=NULL, *vals, fval;
int readsizes, readwgts;
char *line=NULL, *head, *tail, fmtstr[256];
FILE *fpin;
gk_csr_t *mat=NULL;
format = gk_csr_DetermineFormat(filename, format);
if (!gk_fexists(filename))
gk_errexit(SIGERR, "File %s does not exist!\n", filename);
switch (format) {
case GK_CSR_FMT_BINROW:
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr");
if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1)
gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename);
mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind");
if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename);
if (readvals == 1) {
mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval");
if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows])
gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
break;
case GK_CSR_FMT_BINCOL:
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr");
if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1)
gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename);
mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind");
if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename);
if (readvals) {
mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval");
if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols])
gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename);
}
gk_fclose(fpin);
return mat;
break;
case GK_CSR_FMT_IJV:
gk_getfilestats(filename, &nrows, &nnz, NULL, NULL);
if (readvals == 1 && 3*nrows != nnz)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not a multiple of 3.\n", nnz, readvals);
if (readvals == 0 && 2*nrows != nnz)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not a multiple of 2.\n", nnz, readvals);
nnz = nrows;
numbering = (numbering ? - 1 : 0);
/* read the data into three arrays */
iinds = gk_i32malloc(nnz, "iinds");
jinds = gk_i32malloc(nnz, "jinds");
vals = (readvals ? gk_fmalloc(nnz, "vals") : NULL);
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
for (nrows=0, ncols=0, i=0; i<nnz; i++) {
if (readvals) {
if (fscanf(fpin, "%d %d %f", &iinds[i], &jinds[i], &vals[i]) != 3)
gk_errexit(SIGERR, "Error: Failed to read (i, j, val) for nnz: %zd.\n", i);
}
else {
if (fscanf(fpin, "%d %d", &iinds[i], &jinds[i]) != 2)
gk_errexit(SIGERR, "Error: Failed to read (i, j) value for nnz: %zd.\n", i);
}
iinds[i] += numbering;
jinds[i] += numbering;
if (nrows < iinds[i])
nrows = iinds[i];
if (ncols < jinds[i])
ncols = jinds[i];
}
nrows++;
ncols++;
gk_fclose(fpin);
/* convert (i, j, v) into a CSR matrix */
mat = gk_csr_Create();
mat->nrows = nrows;
mat->ncols = ncols;
rowptr = mat->rowptr = gk_zsmalloc(nrows+1, 0, "rowptr");
rowind = mat->rowind = gk_i32malloc(nnz, "rowind");
if (readvals)
rowval = mat->rowval = gk_fmalloc(nnz, "rowval");
for (i=0; i<nnz; i++)
rowptr[iinds[i]]++;
MAKECSR(i, nrows, rowptr);
for (i=0; i<nnz; i++) {
rowind[rowptr[iinds[i]]] = jinds[i];
if (readvals)
rowval[rowptr[iinds[i]]] = vals[i];
rowptr[iinds[i]]++;
}
SHIFTCSR(i, nrows, rowptr);
gk_free((void **)&iinds, &jinds, &vals, LTERM);
return mat;
break;
case GK_CSR_FMT_BIJV:
mat = gk_csr_Create();
fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin");
if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename);
if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename);
if (fread(&nnz, sizeof(size_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the nnz from file %s!\n", filename);
if (fread(&readvals, sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read the readvals from file %s!\n", filename);
/* read the data into three arrays */
iinds = gk_i32malloc(nnz, "iinds");
jinds = gk_i32malloc(nnz, "jinds");
vals = (readvals ? gk_fmalloc(nnz, "vals") : NULL);
for (i=0; i<nnz; i++) {
if (fread(&(iinds[i]), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read iinds[i] from file %s!\n", filename);
if (fread(&(jinds[i]), sizeof(int32_t), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read jinds[i] from file %s!\n", filename);
if (readvals) {
if (fread(&(vals[i]), sizeof(float), 1, fpin) != 1)
gk_errexit(SIGERR, "Failed to read vals[i] from file %s!\n", filename);
}
//printf("%d %d\n", iinds[i], jinds[i]);
}
gk_fclose(fpin);
/* convert (i, j, v) into a CSR matrix */
rowptr = mat->rowptr = gk_zsmalloc(mat->nrows+1, 0, "rowptr");
rowind = mat->rowind = gk_i32malloc(nnz, "rowind");
if (readvals)
rowval = mat->rowval = gk_fmalloc(nnz, "rowval");
for (i=0; i<nnz; i++)
rowptr[iinds[i]]++;
MAKECSR(i, mat->nrows, rowptr);
for (i=0; i<nnz; i++) {
rowind[rowptr[iinds[i]]] = jinds[i];
if (readvals)
rowval[rowptr[iinds[i]]] = vals[i];
rowptr[iinds[i]]++;
}
SHIFTCSR(i, mat->nrows, rowptr);
gk_free((void **)&iinds, &jinds, &vals, LTERM);
return mat;
break;
/* the following are handled by a common input code, that comes after the switch */
case GK_CSR_FMT_CLUTO:
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3)
gk_errexit(SIGERR, "Header line must contain 3 integers.\n");
readsizes = 0;
readwgts = 0;
readvals = 1;
numbering = 1;
break;
case GK_CSR_FMT_METIS:
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
do {
if (gk_getline(&line, &lnlen, fpin) <= 0)
gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename);
} while (line[0] == '%');
fmt = ncon = 0;
nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon);
if (nfields < 2)
gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n");
ncols = nrows;
nnz *= 2;
if (fmt > 111)
gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt);
sprintf(fmtstr, "%03zu", fmt%1000);
readsizes = (fmtstr[0] == '1');
readwgts = (fmtstr[1] == '1');
readvals = (fmtstr[2] == '1');
numbering = 1;
ncon = (ncon == 0 ? 1 : ncon);
break;
case GK_CSR_FMT_CSR:
readsizes = 0;
readwgts = 0;
gk_getfilestats(filename, &nrows, &nnz, NULL, NULL);
if (readvals == 1 && nnz%2 == 1)
gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals);
if (readvals == 1)
nnz = nnz/2;
fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin");
break;
default:
gk_errexit(SIGERR, "Unknown csr format.\n");
return NULL;
}
mat = gk_csr_Create();
mat->nrows = nrows;
rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr");
rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind");
if (readvals != 2)
rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval");
if (readsizes)
mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes");
if (readwgts)
mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts");
/*----------------------------------------------------------------------
* Read the sparse matrix file
*---------------------------------------------------------------------*/
numbering = (numbering ? -1 : 0);
for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) {
do {
if (gk_getline(&line, &lnlen, fpin) == -1)
gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i);
} while (line[0] == '%');
head = line;
tail = NULL;
/* Read vertex sizes */
if (readsizes) {
#ifdef __MSC__
mat->rsizes[i] = (float)strtod(head, &tail);
#else
mat->rsizes[i] = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1);
if (mat->rsizes[i] < 0)
errexit("The size for vertex %zd must be >= 0\n", i+1);
head = tail;
}
/* Read vertex weights */
if (readwgts) {
for (l=0; l<ncon; l++) {
#ifdef __MSC__
mat->rwgts[i*ncon+l] = (float)strtod(head, &tail);
#else
mat->rwgts[i*ncon+l] = strtof(head, &tail);
#endif
if (tail == head)
errexit("The line for vertex %zd does not have enough weights "
"for the %d constraints.\n", i+1, ncon);
if (mat->rwgts[i*ncon+l] < 0)
errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l);
head = tail;
}
}
/* Read the rest of the row */
while (1) {
ival = (int)strtol(head, &tail, 0);
if (tail == head)
break;
head = tail;
if ((rowind[k] = ival + numbering) < 0)
gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i);
ncols = gk_max(rowind[k], ncols);
if (readvals == 1) {
#ifdef __MSC__
fval = (float)strtod(head, &tail);
#else
fval = strtof(head, &tail);
#endif
if (tail == head)
gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k);
head = tail;
rowval[k] = fval;
}
k++;
}
rowptr[i+1] = k;
}
if (format == GK_CSR_FMT_METIS) {
ASSERT(ncols+1 == mat->nrows);
mat->ncols = mat->nrows;
}
else {
mat->ncols = ncols+1;
}
if (k != nnz)
gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in "
"the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k);
gk_fclose(fpin);
gk_free((void **)&line, LTERM);
return mat;
}
/**************************************************************************/
/*! Writes the row-based structure of a matrix into a file.
\param mat is the matrix to be written,
\param filename is the name of the output file.
\param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR,
GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL, GK_CSR_FMT_BIJV.
\param writevals is either 1 or 0 indicating if the values will be
written or not. This is only applicable when GK_CSR_FMT_CSR
is used.
\param numbering is either 1 or 0 indicating if the internal 0-based
numbering will be shifted by one or not during output. This
is only applicable when GK_CSR_FMT_CSR is used.
*/
/**************************************************************************/
void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering)
{
ssize_t i, j;
int32_t edge[2];
FILE *fpout;
format = gk_csr_DetermineFormat(filename, format);
switch (format) {
case GK_CSR_FMT_METIS:
if (mat->nrows != mat->ncols || mat->rowptr[mat->nrows]%2 == 1)
gk_errexit(SIGERR, "METIS output format requires a square symmetric matrix.\n");
if (filename)
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
else
fpout = stdout;
fprintf(fpout, "%d %zd\n", mat->nrows, mat->rowptr[mat->nrows]/2);
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++)
fprintf(fpout, " %d", mat->rowind[j]+1);
fprintf(fpout, "\n");
}
if (filename)
gk_fclose(fpout);
break;
case GK_CSR_FMT_BINROW:
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout);
fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout);
if (writevals)
fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout);
gk_fclose(fpout);
return;
break;
case GK_CSR_FMT_BINCOL:
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout);
fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout);
if (writevals)
fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout);
gk_fclose(fpout);
return;
break;
case GK_CSR_FMT_IJV:
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
numbering = (numbering ? 1 : 0);
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
if (writevals)
fprintf(fpout, "%zd %d %.8f\n", i+numbering, mat->rowind[j]+numbering, mat->rowval[j]);
else
fprintf(fpout, "%zd %d\n", i+numbering, mat->rowind[j]+numbering);
}
}
gk_fclose(fpout);
return;
break;
case GK_CSR_FMT_BIJV:
if (filename == NULL)
gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n");
fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout");
fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout);
fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout);
fwrite(&(mat->rowptr[mat->nrows]), sizeof(size_t), 1, fpout);
fwrite(&writevals, sizeof(int32_t), 1, fpout);
for (i=0; i<mat->nrows; i++) {
edge[0] = i;
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
edge[1] = mat->rowind[j];
fwrite(edge, sizeof(int32_t), 2, fpout);
if (writevals)
fwrite(&(mat->rowval[j]), sizeof(float), 1, fpout);
}
}
gk_fclose(fpout);
return;
break;
default:
if (filename)
fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout");
else
fpout = stdout;
if (format == GK_CSR_FMT_CLUTO) {
fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]);
writevals = 1;
numbering = 1;
}
for (i=0; i<mat->nrows; i++) {
for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) {
fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0));
if (writevals)
fprintf(fpout, " %f", mat->rowval[j]);
}
fprintf(fpout, "\n");
}
if (filename)
gk_fclose(fpout);
}
}
/*************************************************************************/
/*! Prunes certain rows/columns of the matrix. The prunning takes place
by analyzing the row structure of the matrix. The prunning takes place
by removing rows/columns but it does not affect the numbering of the
remaining rows/columns.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param minf is the minimum number of rows (columns) that a column (row) must
be present in order to be kept,
\param maxf is the maximum number of rows (columns) that a column (row) must
be present at in order to be kept.
\returns the prunned matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf)
{
ssize_t i, j, nnz;
int nrows, ncols;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind, *collen;
float *rowval, *nrowval;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval");
switch (what) {
case GK_CSR_COL:
collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
ASSERT(rowind[j] < ncols);
collen[rowind[j]]++;
}
}
for (i=0; i<ncols; i++)
collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0);
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (collen[rowind[j]]) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&collen, LTERM);
break;
case GK_CSR_ROW:
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) {
for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight entries whose
sum accounts for a certain fraction of the overall weight of the
row/column.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param norm indicates the norm that will be used to aggregate the weights
and possible values are 1 or 2,
\param fraction is the fraction of the overall norm that will be retained
by the kept entries.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction)
{
ssize_t i, j, nnz;
int nrows, ncols, ncand, maxlen=0;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval, rsum, tsum;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++)
maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<ncols; i++) {
for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
gk_free((void **)&cand, LTERM);
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
for (i=0; i<nrows; i++)
maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]);
#pragma omp parallel private(i, j, ncand, rsum, tsum, cand)
{
cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand");
#pragma omp for schedule(static)
for (i=0; i<nrows; i++) {
for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]);
}
gk_fkvsortd(ncand, cand);
for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) {
rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key);
nrowind[rowptr[i]+j] = cand[j].val;
nrowval[rowptr[i]+j] = cand[j].key;
}
nrowptr[i+1] = rowptr[i]+j;
}
gk_free((void **)&cand, LTERM);
}
/* compact nrowind/nrowval */
nrowptr[0] = nnz = 0;
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the highest weight top-K entries
along each row/column and those entries whose weight is greater than
a specified value.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param topk is the number of the highest weight entries to keep.
\param keepval is the weight of a term above which will be kept. This
is used to select additional terms past the first topk.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval)
{
ssize_t i, j, k, nnz;
int nrows, ncols, ncand;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind;
float *rowval, *colval, *nrowval;
gk_csr_t *nmat;
gk_fkv_t *cand;
nmat = gk_csr_Create();
nrows = nmat->nrows = mat->nrows;
ncols = nmat->ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval");
switch (what) {
case GK_CSR_COL:
if (mat->colptr == NULL)
gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n");
cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand");
gk_zcopy(nrows+1, rowptr, nrowptr);
for (i=0; i<ncols; i++) {
for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) {
cand[ncand].val = colind[j];
cand[ncand].key = colval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++) {
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
for (; j<ncand; j++) {
if (cand[j].key < keepval)
break;
nrowind[nrowptr[cand[j].val]] = i;
nrowval[nrowptr[cand[j].val]] = cand[j].key;
nrowptr[cand[j].val]++;
}
}
/* compact the nrowind/nrowval */
for (nnz=0, i=0; i<nrows; i++) {
for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) {
nrowind[nnz] = nrowind[j];
nrowval[nnz] = nrowval[j];
}
nrowptr[i] = nnz;
}
SHIFTCSR(i, nrows, nrowptr);
gk_free((void **)&cand, LTERM);
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) {
cand[ncand].val = rowind[j];
cand[ncand].key = rowval[j];
}
gk_fkvsortd(ncand, cand);
k = gk_min(topk, ncand);
for (j=0; j<k; j++, nnz++) {
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
for (; j<ncand; j++, nnz++) {
if (cand[j].key < keepval)
break;
nrowind[nnz] = cand[j].val;
nrowval[nnz] = cand[j].key;
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&cand, LTERM);
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Eliminates certain entries from the rows/columns of the matrix. The
filtering takes place by keeping only the terms whose contribution to
the total length of the document is greater than a user-splied multiple
over the average.
This routine assumes that the vectors are normalized to be unit length.
\param mat the matrix to be prunned,
\param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL)
of the matrix will be prunned,
\param zscore is the multiplicative factor over the average contribution
to the length of the document.
\returns the filtered matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore)
{
ssize_t i, j, nnz;
int nrows;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind;
float *rowval, *nrowval, avgwgt;
gk_csr_t *nmat;
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval");
switch (what) {
case GK_CSR_COL:
gk_errexit(SIGERR, "This has not been implemented yet.\n");
break;
case GK_CSR_ROW:
if (mat->rowptr == NULL)
gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n");
nrowptr[0] = 0;
for (nnz=0, i=0; i<nrows; i++) {
avgwgt = zscore/(rowptr[i+1]-rowptr[i]);
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] > avgwgt) {
nrowind[nnz] = rowind[j];
nrowval[nnz] = rowval[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
break;
default:
gk_csr_Free(&nmat);
gk_errexit(SIGERR, "Unknown prunning type of %d\n", what);
return NULL;
}
return nmat;
}
/*************************************************************************/
/*! Compacts the column-space of the matrix by removing empty columns.
As a result of the compaction, the column numbers are renumbered.
The compaction operation is done in place and only affects the row-based
representation of the matrix.
The new columns are ordered in decreasing frequency.
\param mat the matrix whose empty columns will be removed.
*/
/**************************************************************************/
void gk_csr_CompactColumns(gk_csr_t *mat)
{
ssize_t i;
int nrows, ncols, nncols;
ssize_t *rowptr;
int *rowind, *colmap;
gk_ikv_t *clens;
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap");
clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens");
for (i=0; i<ncols; i++) {
clens[i].key = 0;
clens[i].val = i;
}
for (i=0; i<rowptr[nrows]; i++)
clens[rowind[i]].key++;
gk_ikvsortd(ncols, clens);
for (nncols=0, i=0; i<ncols; i++) {
if (clens[i].key > 0)
colmap[clens[i].val] = nncols++;
else
break;
}
for (i=0; i<rowptr[nrows]; i++)
rowind[i] = colmap[rowind[i]];
mat->ncols = nncols;
gk_free((void **)&colmap, &clens, LTERM);
}
/*************************************************************************/
/*! Sorts the indices in increasing order
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of
indices to sort.
*/
/**************************************************************************/
void gk_csr_SortIndices(gk_csr_t *mat, int what)
{
int n, nn=0;
ssize_t *ptr;
int *ind;
float *val;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
n = mat->nrows;
ptr = mat->rowptr;
ind = mat->rowind;
val = mat->rowval;
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
n = mat->ncols;
ptr = mat->colptr;
ind = mat->colind;
val = mat->colval;
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
#pragma omp parallel if (n > 100)
{
ssize_t i, j, k;
gk_ikv_t *cand;
float *tval;
#pragma omp single
for (i=0; i<n; i++)
nn = gk_max(nn, ptr[i+1]-ptr[i]);
cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand");
tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval");
#pragma omp for schedule(static)
for (i=0; i<n; i++) {
for (k=0, j=ptr[i]; j<ptr[i+1]; j++) {
if (j > ptr[i] && ind[j] < ind[j-1])
k = 1; /* an inversion */
cand[j-ptr[i]].val = j-ptr[i];
cand[j-ptr[i]].key = ind[j];
tval[j-ptr[i]] = val[j];
}
if (k) {
gk_ikvsorti(ptr[i+1]-ptr[i], cand);
for (j=ptr[i]; j<ptr[i+1]; j++) {
ind[j] = cand[j-ptr[i]].key;
val[j] = tval[cand[j-ptr[i]].val];
}
}
}
gk_free((void **)&cand, &tval, LTERM);
}
}
/*************************************************************************/
/*! Creates a row/column index from the column/row data.
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which index
will be created.
*/
/**************************************************************************/
void gk_csr_CreateIndex(gk_csr_t *mat, int what)
{
/* 'f' stands for forward, 'r' stands for reverse */
ssize_t i, j, k, nf, nr;
ssize_t *fptr, *rptr;
int *find, *rind;
float *fval, *rval;
switch (what) {
case GK_CSR_COL:
nf = mat->nrows;
fptr = mat->rowptr;
find = mat->rowind;
fval = mat->rowval;
if (mat->colptr) gk_free((void **)&mat->colptr, LTERM);
if (mat->colind) gk_free((void **)&mat->colind, LTERM);
if (mat->colval) gk_free((void **)&mat->colval, LTERM);
nr = mat->ncols;
rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
case GK_CSR_ROW:
nf = mat->ncols;
fptr = mat->colptr;
find = mat->colind;
fval = mat->colval;
if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM);
if (mat->rowind) gk_free((void **)&mat->rowind, LTERM);
if (mat->rowval) gk_free((void **)&mat->rowval, LTERM);
nr = mat->nrows;
rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr");
rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind");
rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL);
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return;
}
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rptr[find[j]]++;
}
MAKECSR(i, nr, rptr);
if (rptr[nr] > 6*nr) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
SHIFTCSR(i, nr, rptr);
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rval[rptr[find[j]]++] = fval[j];
}
SHIFTCSR(i, nr, rptr);
}
}
else {
if (fval) {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++) {
k = find[j];
rind[rptr[k]] = i;
rval[rptr[k]++] = fval[j];
}
}
}
else {
for (i=0; i<nf; i++) {
for (j=fptr[i]; j<fptr[i+1]; j++)
rind[rptr[find[j]]++] = i;
}
}
SHIFTCSR(i, nr, rptr);
}
}
/*************************************************************************/
/*! Normalizes the rows/columns of the matrix to be unit
length.
\param mat the matrix itself,
\param what indicates what will be normalized and is obtained by
specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL.
\param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm
*/
/**************************************************************************/
void gk_csr_Normalize(gk_csr_t *mat, int what, int norm)
{
ssize_t i, j;
int n;
ssize_t *ptr;
float *val, sum;
if (what&GK_CSR_ROW && mat->rowval) {
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
#pragma omp parallel for if (ptr[n] > OMPMINOPS) private(j,sum) schedule(static)
for (i=0; i<n; i++) {
sum = 0.0;
if (norm == 1) {
for (j=ptr[i]; j<ptr[i+1]; j++)
sum += val[j]; /* assume val[j] > 0 */
if (sum > 0)
sum = 1.0/sum;
}
else if (norm == 2) {
for (j=ptr[i]; j<ptr[i+1]; j++)
sum += val[j]*val[j];
if (sum > 0)
sum = 1.0/sqrt(sum);
}
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
if (what&GK_CSR_COL && mat->colval) {
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
#pragma omp parallel for if (ptr[n] > OMPMINOPS) private(j,sum) schedule(static)
for (i=0; i<n; i++) {
sum = 0.0;
if (norm == 1) {
for (j=ptr[i]; j<ptr[i+1]; j++)
sum += val[j]; /* assume val[j] > 0 */
if (sum > 0)
sum = 1.0/sum;
}
else if (norm == 2) {
for (j=ptr[i]; j<ptr[i+1]; j++)
sum += val[j]*val[j];
if (sum > 0)
sum = 1.0/sqrt(sum);
}
for (j=ptr[i]; j<ptr[i+1]; j++)
val[j] *= sum;
}
}
}
/*************************************************************************/
/*! Applies different row scaling methods.
\param mat the matrix itself,
\param type indicates the type of row scaling. Possible values are:
GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2.
*/
/**************************************************************************/
void gk_csr_Scale(gk_csr_t *mat, int type)
{
ssize_t i, j;
int nrows, ncols, nnzcols, bgfreq;
ssize_t *rowptr;
int *rowind, *collen;
float *rowval, *cscale, maxtf;
double logscale = 1.0/log(2.0);
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
switch (type) {
case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .5 + .5*rowval[j]/maxtf;
}
break;
case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j, maxtf) schedule(static)
for (i=0; i<nrows; i++) {
maxtf = fabs(rowval[rowptr[i]]);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf);
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] = .1 + .9*rowval[j]/maxtf;
}
break;
case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j])));
}
}
break;
case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j]))));
}
}
break;
case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65));
}
}
break;
case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75));
}
}
break;
case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85));
}
}
break;
case GK_CSR_LOG: /* TF' = 1+log_2(TF) */
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) schedule(static,32)
for (i=0; i<rowptr[nrows]; i++) {
if (rowval[i] != 0.0)
rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale;
}
#ifdef XXX
#pragma omp parallel for private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (rowval[j] != 0.0)
rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale;
//rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale);
}
}
#endif
break;
case GK_CSR_IDF: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
#pragma omp parallel for if (ncols > OMPMINOPS) schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0);
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
gk_free((void **)&cscale, &collen, LTERM);
break;
case GK_CSR_IDF2: /* TF' = TF*IDF */
ncols = mat->ncols;
cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale");
collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
collen[rowind[j]]++;
}
nnzcols = 0;
#pragma omp parallel for if (ncols > OMPMINOPS) schedule(static) reduction(+:nnzcols)
for (i=0; i<ncols; i++)
nnzcols += (collen[i] > 0 ? 1 : 0);
bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols));
printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq);
#pragma omp parallel for if (ncols > OMPMINOPS) schedule(static)
for (i=0; i<ncols; i++)
cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0);
#pragma omp parallel for if (rowptr[nrows] > OMPMINOPS) private(j) schedule(static)
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
rowval[j] *= cscale[rowind[j]];
}
gk_free((void **)&cscale, &collen, LTERM);
break;
default:
gk_errexit(SIGERR, "Unknown scaling type of %d\n", type);
}
}
/*************************************************************************/
/*! Computes the sums of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
sums to compute.
*/
/**************************************************************************/
void gk_csr_ComputeSums(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *sums;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rsums)
gk_free((void **)&mat->rsums, LTERM);
sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->csums)
gk_free((void **)&mat->csums, LTERM);
sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums");
break;
default:
gk_errexit(SIGERR, "Invalid sum type of %d.\n", what);
return;
}
if (val) {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1);
}
else {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
sums[i] = ptr[i+1]-ptr[i];
}
}
/*************************************************************************/
/*! Computes the norms of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
squared norms to compute.
\note If the rowval/colval arrays are NULL, the matrix is assumed
to be binary and the norms are computed accordingly.
*/
/**************************************************************************/
void gk_csr_ComputeNorms(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *norms;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM);
norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM);
norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
default:
gk_errexit(SIGERR, "Invalid norm type of %d.\n", what);
return;
}
if (val) {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = sqrt(gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1));
}
else {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = sqrt(ptr[i+1]-ptr[i]);
}
}
/*************************************************************************/
/*! Computes the squared of the norms of the rows/columns
\param mat the matrix itself,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating which
squared norms to compute.
\note If the rowval/colval arrays are NULL, the matrix is assumed
to be binary and the norms are computed accordingly.
*/
/**************************************************************************/
void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what)
{
ssize_t i;
int n;
ssize_t *ptr;
float *val, *norms;
switch (what) {
case GK_CSR_ROW:
n = mat->nrows;
ptr = mat->rowptr;
val = mat->rowval;
if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM);
norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
case GK_CSR_COL:
n = mat->ncols;
ptr = mat->colptr;
val = mat->colval;
if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM);
norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms");
break;
default:
gk_errexit(SIGERR, "Invalid norm type of %d.\n", what);
return;
}
if (val) {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1);
}
else {
#pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static)
for (i=0; i<n; i++)
norms[i] = ptr[i+1]-ptr[i];
}
}
/*************************************************************************/
/*! Returns a new matrix whose rows/columns are shuffled.
\param mat the matrix to be shuffled,
\param what indicates if the rows (GK_CSR_ROW), columns (GK_CSR_COL),
or both (GK_CSR_ROWCOL) will be shuffled,
\param symmetric indicates if the same shuffling will be applied to
both rows and columns. This is valid with nrows==ncols and
GK_CSR_ROWCOL was specified.
\returns the shuffled matrix.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Shuffle(gk_csr_t *mat, int what, int symmetric)
{
ssize_t i, j;
int nrows, ncols;
ssize_t *rowptr, *nrowptr;
int *rowind, *nrowind;
int *rperm, *cperm;
float *rowval, *nrowval;
gk_csr_t *nmat;
if (what == GK_CSR_ROWCOL && symmetric && mat->nrows != mat->ncols)
gk_errexit(SIGERR, "The matrix is not square for a symmetric rowcol shuffling.\n");
nrows = mat->nrows;
ncols = mat->ncols;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
rperm = gk_imalloc(nrows, "gk_csr_Shuffle: rperm");
cperm = gk_imalloc(ncols, "gk_csr_Shuffle: cperm");
switch (what) {
case GK_CSR_ROW:
gk_RandomPermute(nrows, rperm, 1);
for (i=0; i<20; i++)
gk_RandomPermute(nrows, rperm, 0);
for (i=0; i<ncols; i++)
cperm[i] = i;
break;
case GK_CSR_COL:
gk_RandomPermute(ncols, cperm, 1);
for (i=0; i<20; i++)
gk_RandomPermute(ncols, cperm, 0);
for (i=0; i<nrows; i++)
rperm[i] = i;
break;
case GK_CSR_ROWCOL:
gk_RandomPermute(nrows, rperm, 1);
for (i=0; i<20; i++)
gk_RandomPermute(nrows, rperm, 0);
if (symmetric)
gk_icopy(nrows, rperm, cperm);
else {
gk_RandomPermute(ncols, cperm, 1);
for (i=0; i<20; i++)
gk_RandomPermute(ncols, cperm, 0);
}
break;
default:
gk_free((void **)&rperm, &cperm, LTERM);
gk_errexit(SIGERR, "Unknown shuffling type of %d\n", what);
return NULL;
}
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = ncols;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Shuffle: nrowptr");
nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Shuffle: nrowind");
nrowval = nmat->rowval = (rowval ? gk_fmalloc(rowptr[nrows], "gk_csr_Shuffle: nrowval") : NULL) ;
for (i=0; i<nrows; i++)
nrowptr[rperm[i]] = rowptr[i+1]-rowptr[i];
MAKECSR(i, nrows, nrowptr);
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
nrowind[nrowptr[rperm[i]]] = cperm[rowind[j]];
if (nrowval)
nrowval[nrowptr[rperm[i]]] = rowval[j];
nrowptr[rperm[i]]++;
}
}
SHIFTCSR(i, nrows, nrowptr);
gk_free((void **)&rperm, &cperm, LTERM);
return nmat;
}
/*************************************************************************/
/*! Returns the transpose of the matrix.
\param mat the matrix to be transposed,
\returns the transposed matrix.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_Transpose(gk_csr_t *mat)
{
int nrows, ncols;
ssize_t *colptr;
int32_t *colind;
float *colval;
gk_csr_t *nmat;
colptr = mat->colptr;
colind = mat->colind;
colval = mat->colval;
mat->colptr = NULL;
mat->colind = NULL;
mat->colval = NULL;
gk_csr_CreateIndex(mat, GK_CSR_COL);
nmat = gk_csr_Create();
nmat->nrows = mat->ncols;
nmat->ncols = mat->nrows;
nmat->rowptr = mat->colptr;
nmat->rowind = mat->colind;
nmat->rowval = mat->colval;
mat->colptr = colptr;
mat->colind = colind;
mat->colval = colval;
return nmat;
}
/*************************************************************************/
/*! Computes the similarity between two rows/columns
\param mat the matrix itself. The routine assumes that the indices
are sorted in increasing order.
\param i1 is the first row/column,
\param i2 is the second row/column,
\param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of
objects between the similarity will be computed,
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\returns the similarity between the two rows/columns.
*/
/**************************************************************************/
float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what,
int simtype)
{
int nind1, nind2;
int *ind1, *ind2;
float *val1, *val2, stat1, stat2, sim;
switch (what) {
case GK_CSR_ROW:
if (!mat->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
nind1 = mat->rowptr[i1+1]-mat->rowptr[i1];
nind2 = mat->rowptr[i2+1]-mat->rowptr[i2];
ind1 = mat->rowind + mat->rowptr[i1];
ind2 = mat->rowind + mat->rowptr[i2];
val1 = mat->rowval + mat->rowptr[i1];
val2 = mat->rowval + mat->rowptr[i2];
break;
case GK_CSR_COL:
if (!mat->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
nind1 = mat->colptr[i1+1]-mat->colptr[i1];
nind2 = mat->colptr[i2+1]-mat->colptr[i2];
ind1 = mat->colind + mat->colptr[i1];
ind2 = mat->colind + mat->colptr[i2];
val1 = mat->colval + mat->colptr[i1];
val2 = mat->colval + mat->colptr[i2];
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return 0.0;
}
switch (simtype) {
case GK_CSR_COS:
case GK_CSR_JAC:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else {
sim += val1[i1]*val2[i2];
stat1 += val1[i1]*val1[i1];
stat2 += val2[i2]*val2[i2];
i1++;
i2++;
}
}
if (simtype == GK_CSR_COS)
sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0);
else
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_MIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_AMIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1 > 0.0 ? sim/stat1 : 0.0);
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
return sim;
}
/*************************************************************************/
/*! Computes the similarity between two rows/columns
\param mat_a the first matrix. The routine assumes that the indices
are sorted in increasing order.
\param mat_b the second matrix. The routine assumes that the indices
are sorted in increasing order.
\param i1 is the row/column from the first matrix (mat_a),
\param i2 is the row/column from the second matrix (mat_b),
\param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of
objects between the similarity will be computed,
\param simtype is the type of similarity and is one of GK_CSR_COS,
GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN
\returns the similarity between the two rows/columns.
*/
/**************************************************************************/
float gk_csr_ComputePairSimilarity(gk_csr_t *mat_a, gk_csr_t *mat_b,
int i1, int i2, int what, int simtype)
{
int nind1, nind2;
int *ind1, *ind2;
float *val1, *val2, stat1, stat2, sim;
switch (what) {
case GK_CSR_ROW:
if (!mat_a->rowptr || !mat_b->rowptr)
gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n");
nind1 = mat_a->rowptr[i1+1]-mat_a->rowptr[i1];
nind2 = mat_b->rowptr[i2+1]-mat_b->rowptr[i2];
ind1 = mat_a->rowind + mat_a->rowptr[i1];
ind2 = mat_b->rowind + mat_b->rowptr[i2];
val1 = mat_a->rowval + mat_a->rowptr[i1];
val2 = mat_b->rowval + mat_b->rowptr[i2];
break;
case GK_CSR_COL:
if (!mat_a->colptr || !mat_b->colptr)
gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n");
nind1 = mat_a->colptr[i1+1]-mat_a->colptr[i1];
nind2 = mat_b->colptr[i2+1]-mat_b->colptr[i2];
ind1 = mat_a->colind + mat_a->colptr[i1];
ind2 = mat_b->colind + mat_b->colptr[i2];
val1 = mat_a->colval + mat_a->colptr[i1];
val2 = mat_b->colval + mat_b->colptr[i2];
break;
default:
gk_errexit(SIGERR, "Invalid index type of %d.\n", what);
return 0.0;
}
switch (simtype) {
case GK_CSR_COS:
case GK_CSR_JAC:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1]*val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2]*val2[i2];
i2++;
}
else {
sim += val1[i1]*val2[i2];
stat1 += val1[i1]*val1[i1];
stat2 += val2[i2]*val2[i2];
i1++;
i2++;
}
}
if (simtype == GK_CSR_COS)
sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0);
else
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_MIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0);
break;
case GK_CSR_AMIN:
sim = stat1 = stat2 = 0.0;
i1 = i2 = 0;
while (i1<nind1 && i2<nind2) {
if (i1 == nind1) {
stat2 += val2[i2];
i2++;
}
else if (i2 == nind2) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] < ind2[i2]) {
stat1 += val1[i1];
i1++;
}
else if (ind1[i1] > ind2[i2]) {
stat2 += val2[i2];
i2++;
}
else {
sim += gk_min(val1[i1],val2[i2]);
stat1 += val1[i1];
stat2 += val2[i2];
i1++;
i2++;
}
}
sim = (stat1 > 0.0 ? sim/stat1 : 0.0);
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
return sim;
}
/*************************************************************************/
/*! Finds the n most similar rows (neighbors) to the query.
\param mat the matrix itself
\param nqterms is the number of columns in the query
\param qind is the list of query columns
\param qval is the list of correspodning query weights
\param simtype is the type of similarity and is one of GK_CSR_DOTP,
GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN. In case of
GK_CSR_COS, the rows and the query are assumed to be of unit
length.
\param nsim is the maximum number of requested most similar rows.
If -1 is provided, then everything is returned unsorted.
\param minsim is the minimum similarity of the requested most
similar rows
\param hits is the result set. This array should be at least
of length nsim.
\param i_marker is an array of size equal to the number of rows
whose values are initialized to -1. If NULL is provided
then this array is allocated and freed internally.
\param i_cand is an array of size equal to the number of rows.
If NULL is provided then this array is allocated and freed
internally.
\returns The number of identified most similar rows, which can be
smaller than the requested number of nnbrs in those cases
in which there are no sufficiently many neighbors.
*/
/**************************************************************************/
int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind,
float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits,
int *i_marker, gk_fkv_t *i_cand)
{
ssize_t i, ii, j, k;
int nrows, ncols, ncand;
ssize_t *colptr;
int *colind, *marker;
float *colval, *rnorms, mynorm, *rsums, mysum;
gk_fkv_t *cand;
if (nqterms == 0)
return 0;
nrows = mat->nrows;
ncols = mat->ncols;
GKASSERT((colptr = mat->colptr) != NULL);
GKASSERT((colind = mat->colind) != NULL);
GKASSERT((colval = mat->colval) != NULL);
marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker"));
cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand"));
switch (simtype) {
case GK_CSR_DOTP:
case GK_CSR_COS:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
break;
case GK_CSR_JAC:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += colval[j]*qval[ii];
}
}
}
GKASSERT((rnorms = mat->rnorms) != NULL);
mynorm = gk_fdot(nqterms, qval, 1, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key);
break;
case GK_CSR_MIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
GKASSERT((rsums = mat->rsums) != NULL);
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key);
break;
/* Assymetric MIN similarity */
case GK_CSR_AMIN:
for (ncand=0, ii=0; ii<nqterms; ii++) {
i = qind[ii];
if (i < ncols) {
for (j=colptr[i]; j<colptr[i+1]; j++) {
k = colind[j];
if (marker[k] == -1) {
cand[ncand].val = k;
cand[ncand].key = 0;
marker[k] = ncand++;
}
cand[marker[k]].key += gk_min(colval[j], qval[ii]);
}
}
}
mysum = gk_fsum(nqterms, qval, 1);
for (i=0; i<ncand; i++)
cand[i].key = cand[i].key/mysum;
break;
default:
gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype);
return -1;
}
/* go and prune the hits that are bellow minsim */
for (j=0, i=0; i<ncand; i++) {
marker[cand[i].val] = -1;
if (cand[i].key >= minsim)
cand[j++] = cand[i];
}
ncand = j;
if (nsim == -1 || nsim >= ncand) {
nsim = ncand;
}
else {
nsim = gk_min(nsim, ncand);
gk_dfkvkselect(ncand, nsim, cand);
gk_fkvsortd(nsim, cand);
}
gk_fkvcopy(nsim, cand, hits);
if (i_marker == NULL)
gk_free((void **)&marker, LTERM);
if (i_cand == NULL)
gk_free((void **)&cand, LTERM);
return nsim;
}
/*************************************************************************/
/*! Returns a symmetric version of a square matrix. The symmetric version
is constructed by applying an A op A^T operation, where op is one of
GK_CSR_SYM_SUM, GK_CSR_SYM_MIN, GK_CSR_SYM_MAX, GK_CSR_SYM_AVG.
\param mat the matrix to be symmetrized,
\param op indicates the operation to be performed. The possible values are
GK_CSR_SYM_SUM, GK_CSR_SYM_MIN, GK_CSR_SYM_MAX, and GK_CSR_SYM_AVG.
\returns the symmetrized matrix consisting only of its row-based structure.
The input matrix is not modified.
*/
/**************************************************************************/
gk_csr_t *gk_csr_MakeSymmetric(gk_csr_t *mat, int op)
{
ssize_t i, j, k, nnz;
int nrows, nadj, hasvals;
ssize_t *rowptr, *colptr, *nrowptr;
int *rowind, *colind, *nrowind, *marker, *ids;
float *rowval=NULL, *colval=NULL, *nrowval=NULL, *wgts=NULL;
gk_csr_t *nmat;
if (mat->nrows != mat->ncols) {
fprintf(stderr, "gk_csr_MakeSymmetric: The matrix needs to be square.\n");
return NULL;
}
hasvals = (mat->rowval != NULL);
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
if (hasvals)
rowval = mat->rowval;
/* create the column view for efficient processing */
colptr = gk_zsmalloc(nrows+1, 0, "colptr");
colind = gk_i32malloc(rowptr[nrows], "colind");
if (hasvals)
colval = gk_fmalloc(rowptr[nrows], "colval");
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++)
colptr[rowind[j]]++;
}
MAKECSR(i, nrows, colptr);
for (i=0; i<nrows; i++) {
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
colind[colptr[rowind[j]]] = i;
if (hasvals)
colval[colptr[rowind[j]]] = rowval[j];
colptr[rowind[j]]++;
}
}
SHIFTCSR(i, nrows, colptr);
nmat = gk_csr_Create();
nmat->nrows = mat->nrows;
nmat->ncols = mat->ncols;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_MakeSymmetric: nrowptr");
nrowind = nmat->rowind = gk_imalloc(2*rowptr[nrows], "gk_csr_MakeSymmetric: nrowind");
if (hasvals)
nrowval = nmat->rowval = gk_fmalloc(2*rowptr[nrows], "gk_csr_MakeSymmetric: nrowval");
marker = gk_ismalloc(nrows, -1, "marker");
ids = gk_imalloc(nrows, "ids");
if (hasvals)
wgts = gk_fmalloc(nrows, "wgts");
nrowptr[0] = nnz = 0;
for (i=0; i<nrows; i++) {
nadj = 0;
/* out-edges */
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
ids[nadj] = rowind[j];
if (hasvals)
wgts[nadj] = (op == GK_CSR_SYM_AVG ? 0.5*rowval[j] : rowval[j]);
marker[rowind[j]] = nadj++;
}
/* in-edges */
for (j=colptr[i]; j<colptr[i+1]; j++) {
if (marker[colind[j]] == -1) {
if (op != GK_CSR_SYM_MIN) {
ids[nadj] = colind[j];
if (hasvals)
wgts[nadj] = (op == GK_CSR_SYM_AVG ? 0.5*colval[j] : colval[j]);
nadj++;
}
}
else {
if (hasvals) {
switch (op) {
case GK_CSR_SYM_MAX:
wgts[marker[colind[j]]] = gk_max(colval[j], wgts[marker[colind[j]]]);
break;
case GK_CSR_SYM_MIN:
wgts[marker[colind[j]]] = gk_min(colval[j], wgts[marker[colind[j]]]);
break;
case GK_CSR_SYM_SUM:
wgts[marker[colind[j]]] += colval[j];
break;
case GK_CSR_SYM_AVG:
wgts[marker[colind[j]]] = 0.5*(wgts[marker[colind[j]]] + colval[j]);
break;
default:
errexit("Unsupported op for MakeSymmetric!\n");
}
}
marker[colind[j]] = -1;
}
}
/* go over out edges again to resolve any edges that were not found in the in
* edges */
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
if (marker[rowind[j]] != -1) {
if (op == GK_CSR_SYM_MIN)
ids[marker[rowind[j]]] = -1;
marker[rowind[j]] = -1;
}
}
/* put the non '-1' entries in ids[] into i's row */
for (j=0; j<nadj; j++) {
if (ids[j] != -1) {
nrowind[nnz] = ids[j];
if (hasvals)
nrowval[nnz] = wgts[j];
nnz++;
}
}
nrowptr[i+1] = nnz;
}
gk_free((void **)&colptr, &colind, &colval, &marker, &ids, &wgts, LTERM);
return nmat;
}
/*************************************************************************/
/*! This function finds the connected components in a graph stored in
CSR format.
\param mat is the graph structure in CSR format
\param cptr is the ptr structure of the CSR representation of the
components. The length of this vector must be mat->nrows+1.
\param cind is the indices structure of the CSR representation of
the components. The length of this vector must be mat->nrows.
\param cids is an array that stores the component # of each vertex
of the graph. The length of this vector must be mat->nrows.
\returns the number of components that it found.
\note The cptr, cind, and cids parameters can be NULL, in which case
only the number of connected components is returned.
*/
/*************************************************************************/
int gk_csr_FindConnectedComponents(gk_csr_t *mat, int32_t *cptr, int32_t *cind,
int32_t *cids)
{
ssize_t i, ii, j, jj, k, nvtxs, first, last, ntodo, ncmps;
ssize_t *xadj;
int32_t *adjncy, *pos, *todo;
int32_t mustfree_ccsr=0, mustfree_where=0;
if (mat->nrows != mat->ncols) {
fprintf(stderr, "gk_csr_FindComponents: The matrix needs to be square.\n");
return -1;
}
nvtxs = mat->nrows;
xadj = mat->rowptr;
adjncy = mat->rowind;
/* Deal with NULL supplied cptr/cind vectors */
if (cptr == NULL) {
cptr = gk_i32malloc(nvtxs+1, "gk_csr_FindComponents: cptr");
cind = gk_i32malloc(nvtxs, "gk_csr_FindComponents: cind");
mustfree_ccsr = 1;
}
/* The list of vertices that have not been touched yet.
The valid entries are from [0..ntodo). */
todo = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_csr_FindComponents: todo"));
/* For a vertex that has not been visited, pos[i] is the position in the
todo list that this vertex is stored.
If a vertex has been visited, pos[i] = -1. */
pos = gk_i32incset(nvtxs, 0, gk_i32malloc(nvtxs, "gk_csr_FindComponents: pos"));
/* Find the connected componends */
ncmps = -1;
ntodo = nvtxs; /* All vertices have not been visited */
first = last = 0; /* Point to the first and last vertices that have been touched
but not explored.
These vertices are stored in cind[first]...cind[last-1]. */
while (first < last || ntodo > 0) {
if (first == last) { /* Find another starting vertex */
cptr[++ncmps] = first; /* Mark the end of the current CC */
/* put the first vertex in the todo list as the start of the new CC */
ASSERT(pos[todo[0]] != -1);
cind[last++] = todo[0];
pos[todo[0]] = -1;
todo[0] = todo[--ntodo];
pos[todo[0]] = 0;
}
i = cind[first++]; /* Get the first visited but unexplored vertex */
for (j=xadj[i]; j<xadj[i+1]; j++) {
k = adjncy[j];
if (pos[k] != -1) {
cind[last++] = k;
/* Remove k from the todo list and put the last item in the todo
list at the position that k was so that the todo list will be
consequtive. The pos[] array is updated accordingly to keep track
the location of the vertices in the todo[] list. */
todo[pos[k]] = todo[--ntodo];
pos[todo[pos[k]]] = pos[k];
pos[k] = -1;
}
}
}
cptr[++ncmps] = first;
/* see if we need to return cids */
if (cids != NULL) {
for (i=0; i<ncmps; i++) {
for (j=cptr[i]; j<cptr[i+1]; j++)
cids[cind[j]] = i;
}
}
if (mustfree_ccsr)
gk_free((void **)&cptr, &cind, LTERM);
gk_free((void **)&pos, &todo, LTERM);
return (int) ncmps;
}
/*************************************************************************/
/*! Returns a matrix that has been reordered according to the provided
row/column permutation. The matrix is required to be square and the same
permutation is applied to both rows and columns.
\param[IN] mat is the matrix to be re-ordered.
\param[IN] perm is the new ordering of the rows & columns
\param[IN] iperm is the original ordering of the re-ordered matrix's rows & columns
\returns the newly created reordered matrix.
\note Either perm or iperm can be NULL but not both.
*/
/**************************************************************************/
gk_csr_t *gk_csr_ReorderSymmetric(gk_csr_t *mat, int32_t *perm, int32_t *iperm)
{
ssize_t j, jj;
ssize_t *rowptr, *nrowptr;
int i, k, u, v, nrows;
int freeperm=0, freeiperm=0;
int32_t *rowind, *nrowind;
float *rowval, *nrowval;
gk_csr_t *nmat;
if (mat->nrows != mat->ncols) {
fprintf(stderr, "gk_csr_ReorderSymmetric: The matrix needs to be square.\n");
return NULL;
}
if (perm == NULL && iperm == NULL)
return NULL;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
rowval = mat->rowval;
nmat = gk_csr_Create();
nmat->nrows = nrows;
nmat->ncols = nrows;
nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ReorderSymmetric: rowptr");
nrowind = nmat->rowind = gk_i32malloc(rowptr[nrows], "gk_csr_ReorderSymmetric: rowind");
nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ReorderSymmetric: rowval");
/* allocate memory for the different structures present in the matrix */
if (mat->rlabels)
nmat->rlabels = gk_i32malloc(nrows, "gk_csr_ReorderSymmetric: rlabels");
if (mat->rmap)
nmat->rmap = gk_i32malloc(nrows, "gk_csr_ReorderSymmetric: rmap");
if (mat->rnorms)
nmat->rnorms = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: rnorms");
if (mat->rsums)
nmat->rsums = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: rsums");
if (mat->rsizes)
nmat->rsizes = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: rsizes");
if (mat->rvols)
nmat->rvols = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: rvols");
if (mat->rwgts)
nmat->rwgts = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: rwgts");
if (mat->clabels)
nmat->clabels = gk_i32malloc(nrows, "gk_csr_ReorderSymmetric: clabels");
if (mat->cmap)
nmat->cmap = gk_i32malloc(nrows, "gk_csr_ReorderSymmetric: cmap");
if (mat->cnorms)
nmat->cnorms = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: cnorms");
if (mat->csums)
nmat->csums = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: csums");
if (mat->csizes)
nmat->csizes = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: csizes");
if (mat->cvols)
nmat->cvols = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: cvols");
if (mat->cwgts)
nmat->cwgts = gk_fmalloc(nrows, "gk_csr_ReorderSymmetric: cwgts");
/* create perm/iperm if not provided */
if (perm == NULL) {
freeperm = 1;
perm = gk_i32malloc(nrows, "gk_csr_ReorderSymmetric: perm");
for (i=0; i<nrows; i++)
perm[iperm[i]] = i;
}
if (iperm == NULL) {
freeiperm = 1;
iperm = gk_i32malloc(nrows, "gk_csr_ReorderSymmetric: iperm");
for (i=0; i<nrows; i++)
iperm[perm[i]] = i;
}
/* fill-in the information of the re-ordered matrix */
nrowptr[0] = jj = 0;
for (v=0; v<nrows; v++) {
u = iperm[v];
for (j=rowptr[u]; j<rowptr[u+1]; j++, jj++) {
nrowind[jj] = perm[rowind[j]];
nrowval[jj] = rowval[j];
}
if (mat->rlabels)
nmat->rlabels[v] = mat->rlabels[u];
if (mat->rmap)
nmat->rmap[v] = mat->rmap[u];
if (mat->rnorms)
nmat->rnorms[v] = mat->rnorms[u];
if (mat->rsums)
nmat->rsums[v] = mat->rsums[u];
if (mat->rsizes)
nmat->rsizes[v] = mat->rsizes[u];
if (mat->rvols)
nmat->rvols[v] = mat->rvols[u];
if (mat->rwgts)
nmat->rwgts[v] = mat->rwgts[u];
if (mat->clabels)
nmat->clabels[v] = mat->clabels[u];
if (mat->cmap)
nmat->cmap[v] = mat->cmap[u];
if (mat->cnorms)
nmat->cnorms[v] = mat->cnorms[u];
if (mat->csums)
nmat->csums[v] = mat->csums[u];
if (mat->csizes)
nmat->csizes[v] = mat->csizes[u];
if (mat->cvols)
nmat->cvols[v] = mat->cvols[u];
if (mat->cwgts)
nmat->cwgts[v] = mat->cwgts[u];
nrowptr[v+1] = jj;
}
/* free memory */
if (freeperm)
gk_free((void **)&perm, LTERM);
if (freeiperm)
gk_free((void **)&iperm, LTERM);
return nmat;
}
/*************************************************************************/
/*! This function computes a permutation of the rows/columns of a symmetric
matrix based on a breadth-first-traversal. It can be used for re-ordering
the matrix to reduce its bandwidth for better cache locality.
\param[IN] mat is the matrix whose ordering to be computed.
\param[IN] maxdegree is the maximum number of nonzeros of the rows that
will participate in the BFS ordering. Rows with more nonzeros
will be put at the front of the ordering in decreasing degree
order.
\param[IN] v is the starting row of the BFS. A value of -1 indicates that
a randomly selected row will be used.
\param[OUT] perm[i] stores the ID of row i in the re-ordered matrix.
\param[OUT] iperm[i] stores the ID of the row that corresponds to
the ith vertex in the re-ordered matrix.
\note The perm or iperm (but not both) can be NULL, at which point,
the corresponding arrays are not returned. Though the program
works fine when both are NULL, doing that is not smart.
The returned arrays should be freed with gk_free().
*/
/*************************************************************************/
void gk_csr_ComputeBFSOrderingSymmetric(gk_csr_t *mat, int maxdegree, int v,
int32_t **r_perm, int32_t **r_iperm)
{
int i, k, nrows, first, last;
ssize_t j, *rowptr;
int32_t *rowind, *cot, *pos;
if (mat->nrows != mat->ncols) {
fprintf(stderr, "gk_csr_ComputeBFSOrderingSymmetric: The matrix needs to be square.\n");
return;
}
if (maxdegree < mat->nrows && v != -1) {
fprintf(stderr, "gk_csr_ComputeBFSOrderingSymmetric: Since maxdegree node renumbering is requested the starting row should be -1.\n");
return;
}
if (mat->nrows <= 0)
return;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
/* This array will function like pos + touched of the CC method */
pos = gk_i32incset(nrows, 0, gk_i32malloc(nrows, "gk_csr_ComputeBFSOrderingSymmetric: pos"));
/* This array ([C]losed[O]pen[T]odo => cot) serves three purposes.
Positions from [0...first) is the current iperm[] vector of the explored rows;
Positions from [first...last) is the OPEN list (i.e., visited rows);
Positions from [last...nrows) is the todo list. */
cot = gk_i32incset(nrows, 0, gk_i32malloc(nrows, "gk_csr_ComputeBFSOrderingSymmetric: cot"));
first = last = 0;
/* deal with maxdegree handling */
if (maxdegree < nrows) {
last = nrows;
for (i=nrows-1; i>=0; i--) {
if (rowptr[i+1]-rowptr[i] < maxdegree) {
cot[--last] = i;
pos[i] = last;
}
else {
cot[first++] = i;
pos[i] = -1;
}
}
GKASSERT(first == last);
if (last > 0) { /* reorder them in degree decreasing order */
gk_ikv_t *cand = gk_ikvmalloc(first, "gk_csr_ComputeBFSOrderingSymmetric: cand");
for (i=0; i<first; i++) {
k = cot[i];
cand[i].key = (int)(rowptr[k+1]-rowptr[k]);
cand[i].val = k;
}
gk_ikvsortd(first, cand);
for (i=0; i<first; i++)
cot[i] = cand[i].val;
gk_free((void **)&cand, LTERM);
}
v = cot[last + RandomInRange(nrows-last)];
}
/* swap v with the front of the todo list */
cot[pos[v]] = cot[last];
pos[cot[last]] = pos[v];
cot[last] = v;
pos[v] = last;
/* start processing the nodes */
while (first < nrows) {
if (first == last) { /* find another starting row */
k = cot[last];
GKASSERT(pos[k] != -1);
pos[k] = -1; /* mark node as being visited */
last++;
}
i = cot[first++]; /* the ++ advances the explored rows */
for (j=rowptr[i]; j<rowptr[i+1]; j++) {
k = rowind[j];
/* if a node has already been visited, its perm[] will be -1 */
if (pos[k] != -1) {
/* pos[k] is the location within iperm of where k resides (it is in the 'todo' part);
It is placed in that location cot[last] (end of OPEN list) that we
are about to overwrite and update pos[cot[last]] to reflect that. */
cot[pos[k]] = cot[last]; /* put the head of the todo list to
where k was in the todo list */
pos[cot[last]] = pos[k]; /* update perm to reflect the move */
cot[last++] = k; /* put node at the end of the OPEN list */
pos[k] = -1; /* mark node as being visited */
}
}
}
/* time to decide what to return */
if (r_perm != NULL) {
/* use the 'pos' array to build the perm array */
for (i=0; i<nrows; i++)
pos[cot[i]] = i;
*r_perm = pos;
pos = NULL;
}
if (r_iperm != NULL) {
*r_iperm = cot;
cot = NULL;
}
/* cleanup memory */
gk_free((void **)&pos, &cot, LTERM);
}
/*************************************************************************/
/*! This function computes a permutation of the rows of a symmetric matrix
based on a best-first-traversal. It can be used for re-ordering the matrix
to reduce its bandwidth for better cache locality.
\param[IN] mat is the matrix structure.
\param[IN] v is the starting row of the best-first traversal.
\param[IN] type indicates the criteria to use to measure the 'bestness'
of a row.
\param[OUT] perm[i] stores the ID of row i in the re-ordered matrix.
\param[OUT] iperm[i] stores the ID of the row that corresponds to
the ith row in the re-ordered matrix.
\note The perm or iperm (but not both) can be NULL, at which point,
the corresponding arrays are not returned. Though the program
works fine when both are NULL, doing that is not smart.
The returned arrays should be freed with gk_free().
*/
/*************************************************************************/
void gk_csr_ComputeBestFOrderingSymmetric(gk_csr_t *mat, int v, int type,
int32_t **r_perm, int32_t **r_iperm)
{
ssize_t j, jj, *rowptr;
int i, k, u, nrows, nopen, ntodo;
int32_t *rowind, *perm, *degrees, *wdegrees, *sod, *level, *ot, *pos;
gk_i32pq_t *queue;
if (mat->nrows != mat->ncols) {
fprintf(stderr, "gk_csr_ComputeBestFOrderingSymmetric: The matrix needs to be square.\n");
return;
}
if (mat->nrows <= 0)
return;
nrows = mat->nrows;
rowptr = mat->rowptr;
rowind = mat->rowind;
/* the degree of the vertices in the closed list */
degrees = gk_i32smalloc(nrows, 0, "gk_csr_ComputeBestFOrderingSymmetric: degrees");
/* the weighted degree of the vertices in the closed list for type==3 */
wdegrees = gk_i32smalloc(nrows, 0, "gk_csr_ComputeBestFOrderingSymmetric: wdegrees");
/* the sum of differences for type==4 */
sod = gk_i32smalloc(nrows, 0, "gk_csr_ComputeBestFOrderingSymmetric: sod");
/* the encountering level of a vertex type==5 */
level = gk_i32smalloc(nrows, 0, "gk_csr_ComputeBestFOrderingSymmetric: level");
/* The open+todo list of vertices.
The vertices from [0..nopen] are the open vertices.
The vertices from [nopen..ntodo) are the todo vertices.
*/
ot = gk_i32incset(nrows, 0, gk_i32malloc(nrows, "gk_csr_ComputeBestFOrderingSymmetric: ot"));
/* For a vertex that has not been explored, pos[i] is the position in the ot list. */
pos = gk_i32incset(nrows, 0, gk_i32malloc(nrows, "gk_csr_ComputeBestFOrderingSymmetric: pos"));
/* if perm[i] >= 0, then perm[i] is the order of vertex i; otherwise perm[i] == -1. */
perm = gk_i32smalloc(nrows, -1, "gk_csr_ComputeBestFOrderingSymmetric: perm");
/* create the queue and put the starting vertex in it */
queue = gk_i32pqCreate(nrows);
gk_i32pqInsert(queue, v, 1);
/* put v at the front of the open list */
pos[0] = ot[0] = v;
pos[v] = ot[v] = 0;
nopen = 1;
ntodo = nrows;
/* start processing the nodes */
for (i=0; i<nrows; i++) {
if (nopen == 0) { /* deal with non-connected graphs */
gk_i32pqInsert(queue, ot[0], 1);
nopen++;
}
if ((v = gk_i32pqGetTop(queue)) == -1)
gk_errexit(SIGERR, "The priority queue got empty ahead of time [i=%d].\n", i);
if (perm[v] != -1)
gk_errexit(SIGERR, "The perm[%d] has already been set.\n", v);
perm[v] = i;
if (ot[pos[v]] != v)
gk_errexit(SIGERR, "Something went wrong [ot[pos[%d]]!=%d.\n", v, v);
if (pos[v] >= nopen)
gk_errexit(SIGERR, "The position of v is not in open list. pos[%d]=%d is >=%d.\n", v, pos[v], nopen);
/* remove v from the open list and re-arrange the todo part of the list */
ot[pos[v]] = ot[nopen-1];
pos[ot[nopen-1]] = pos[v];
if (ntodo > nopen) {
ot[nopen-1] = ot[ntodo-1];
pos[ot[ntodo-1]] = nopen-1;
}
nopen--;
ntodo--;
for (j=rowptr[v]; j<rowptr[v+1]; j++) {
u = rowind[j];
if (perm[u] == -1) {
/* update ot list, if u is not in the open list by putting it at the end
of the open list. */
if (degrees[u] == 0) {
ot[pos[u]] = ot[nopen];
pos[ot[nopen]] = pos[u];
ot[nopen] = u;
pos[u] = nopen;
nopen++;
level[u] = level[v]+1;
gk_i32pqInsert(queue, u, 0);
}
/* update the in-closed degree */
degrees[u]++;
/* update the queues based on the type */
switch (type) {
case 1: /* DFS */
gk_i32pqUpdate(queue, u, 1000*(i+1)+degrees[u]);
break;
case 2: /* Max in closed degree */
gk_i32pqUpdate(queue, u, degrees[u]);
break;
case 3: /* Sum of orders in closed list */
wdegrees[u] += i;
gk_i32pqUpdate(queue, u, wdegrees[u]);
break;
case 4: /* Sum of order-differences */
/* this is handled at the end of the loop */
;
break;
case 5: /* BFS with in degree priority */
gk_i32pqUpdate(queue, u, -(1000*level[u] - degrees[u]));
break;
case 6: /* Hybrid of 1+2 */
gk_i32pqUpdate(queue, u, (i+1)*degrees[u]);
break;
default:
;
}
}
}
if (type == 4) { /* update all the vertices in the open list */
for (j=0; j<nopen; j++) {
u = ot[j];
if (perm[u] != -1)
gk_errexit(SIGERR, "For i=%d, the open list contains a closed row: ot[%zd]=%d, perm[%d]=%d.\n", i, j, u, u, perm[u]);
sod[u] += degrees[u];
if (i<1000 || i%25==0)
gk_i32pqUpdate(queue, u, sod[u]);
}
}
/*
for (j=0; j<ntodo; j++) {
if (pos[ot[j]] != j)
gk_errexit(SIGERR, "pos[ot[%zd]] != %zd.\n", j, j);
}
*/
}
/* time to decide what to return */
if (r_iperm != NULL) {
/* use the 'degrees' array to build the iperm array */
for (i=0; i<nrows; i++)
degrees[perm[i]] = i;
*r_iperm = degrees;
degrees = NULL;
}
if (r_perm != NULL) {
*r_perm = perm;
perm = NULL;
}
/* cleanup memory */
gk_i32pqDestroy(queue);
gk_free((void **)&perm, °rees, &wdegrees, &sod, &ot, &pos, &level, LTERM);
}
|
GB_unaryop__identity_int64_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_int64_uint16
// op(A') function: GB_tran__identity_int64_uint16
// C type: int64_t
// A type: uint16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_int64_uint16
(
int64_t *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_int64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_fc32_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc32_uint64)
// op(A') function: GB (_unop_tran__identity_fc32_uint64)
// C type: GxB_FC32_t
// A type: uint64_t
// cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc32_uint64)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint64_t aij = Ax [p] ;
GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc32_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
axpy4.base.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h>
double getClock()
{
struct timezone tzp;
struct timeval tp;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main(int argc, char *argv[])
{
double *y;
double *x1;
double *x2;
double *x3;
double *x4;
double a1;
double a2;
double a3;
double a4;
int n = N;
{
int i1;
y = (double*) malloc((n) * sizeof(double));
x1 = (double*) malloc((n) * sizeof(double));
x2 = (double*) malloc((n) * sizeof(double));
x3 = (double*) malloc((n) * sizeof(double));
x4 = (double*) malloc((n) * sizeof(double));
for (i1=0; i1<n; i1++) {
x1[i1] = (i1+1) % 4 + 1;
x2[i1] = (i1+5) % 10 + 1;
x3[i1] = (i1+3) % 6 + 1;
x4[i1] = (i1+9) % 9 + 1;
y[i1] = 0;
}
a1 = (double) 6;
a2 = (double) 7;
a3 = (double) 4;
a4 = (double) 1;
}
double orio_t_start, orio_t_end, orio_t_total=0;
int orio_i;
int reps = REPS;
#ifdef TEST
reps = 1;
#endif
orio_t_start = getClock();
for (orio_i=0; orio_i<reps; orio_i++)
{
int i;
#pragma omp parallel for
for (i=0; i<=n-1; i++)
y[i]=y[i]+a1*x1[i]+a2*x2[i]+a3*x3[i]+a4*x4[i];
}
orio_t_end = getClock();
orio_t_total = orio_t_end - orio_t_start;
orio_t_total = orio_t_total / REPS;
double mflops = (8.0*N)/(orio_t_total*1000000);
#ifdef TEST
{
int i;
for (i=0; i<=n-1; i++) {
if (i%10 == 0)
printf("\n");
printf("%f ",y[i]);
}
}
#else
printf("%f\t%f\n", orio_t_total, mflops);
#endif
return y[0];
}
|
GB_binop__first_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__first_uint32
// A.*B function (eWiseMult): GB_AemultB__first_uint32
// A*D function (colscale): GB_AxD__first_uint32
// D*A function (rowscale): GB_DxB__first_uint32
// C+=B function (dense accum): GB_Cdense_accumB__first_uint32
// C+=b function (dense accum): GB_Cdense_accumb__first_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_uint32
// C=scalar+B GB_bind1st__first_uint32
// C=scalar+B' GB_bind1st_tran__first_uint32
// C=A+scalar (none)
// C=A'+scalar (none)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = x ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_UINT32 || GxB_NO_FIRST_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__first_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__first_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__first_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__first_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__first_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__first_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__first_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__first_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB_bind1st_tran__first_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.