source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
parallel_snake.c | #include "main.h"
#include <stdio.h>
#include <stdlib.h>
typedef struct cel Cell;
typedef struct coord Coord;
// aloca o celula pentru lista
Cell* alocaCell(int i, int j) {
Cell* cell = (Cell*)malloc(sizeof(Cell));
cell->prev = NULL;
cell->next = NULL;
cell->poz.line = i;
cell->poz.col = j;
return cell;
}
//adauga celula la sfarsitul listei
void addCell(struct snake* snake, Cell* el) {
if(snake->coada == NULL && snake->cap == NULL) {
snake->cap = el;
snake->coada = el;
return;
}
snake->coada->next = el;
el->prev = snake->coada;
el->next = NULL;
snake->coada = el;
}
//adauga celula din coada in capul listei
void attachHead(struct snake* snake) {
if(snake->cap == snake->coada)
return;
snake->coada->next = snake->cap;
snake->cap->prev = snake->coada;
snake->coada = snake->coada->prev;
snake->coada->next = NULL;
snake->cap = snake->cap->prev;
snake->cap->prev = NULL;
}
//completeaza sarpele pornind din coordonatele capului
void completeSnake(struct snake* snake, int **world, int num_lines, int num_cols) {
int crti = snake->head.line;;
int crtj = snake->head.col;
int auxi;
int auxj;
char prevDir = snake->direction;
snake->cap = alocaCell(snake->head.line, snake->head.col);
snake->coada = snake->cap;
auxi = crti;
auxj = crtj;
while(1) {
// daca directia precedenta este N, atunci nu se va mai cauta sus
if(prevDir == 'N') {
if(auxj == 0)
auxj = num_cols - 1;
else
auxj--;
//stanga
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'E';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if(auxj == num_cols - 1)
auxj = 0;
else
auxj++;
//dreapta
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'V';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if (auxi == num_lines - 1) {
auxi = 0;
}
else {
auxi++;
}
//jos
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'N';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
break;
//daca pozitia precedenta este S, nu se va mai cauta jos
} else if (prevDir == 'S') {
if(auxi == 0)
auxi = num_lines - 1;
else
auxi--;
//sus
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'S';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if(auxj == 0)
auxj = num_cols - 1;
else
auxj--;
//stanga
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'E';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if(auxj == num_cols - 1)
auxj = 0;
else
auxj++;
//dreapta
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'V';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
break;
//daca pozitia precedenta este E, nu se va mai cauta in dreapta
} else if (prevDir == 'E') {
if(auxi == 0)
auxi = num_lines - 1;
else
auxi--;
//sus
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'S';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if (auxi == num_lines - 1)
auxi = 0;
else
auxi++;
//jos
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'N';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if(auxj == 0)
auxj = num_cols - 1;
else
auxj--;
//stanga
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'E';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
break;
//daca pozitia precedenta este V, nu se va mai cauta in stanga
} else if (prevDir == 'V') {
if(auxi == 0)
auxi = num_lines - 1;
else
auxi--;
//sus
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'S';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if (auxi == num_lines - 1)
auxi = 0;
else
auxi++;
//jos
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'N';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
if(auxj == num_cols - 1)
auxj = 0;
else
auxj++;
//dreapta
if(world[auxi][auxj] == snake->encoding) {
prevDir = 'V';
Cell* cell = alocaCell(auxi, auxj);
addCell(snake, cell);
crti = auxi;
crtj = auxj;
continue;
} else {
auxi = crti;
auxj = crtj;
}
//daca nu am gasit in cele 3 directii, atunci inseamna ca am dat de coada si ies din loop
break;
}
}
}
int checkCollision(struct snake* snake, int** world) {
return (world[snake->head.line][snake->head.col] != 0);
}
//calcularea noilor pozitii
void computeMoves(struct snake* snake, int** world, int num_lines, int num_cols) {
snake->lastTail = snake->coada->poz; //se pastreaza coada anterioara a sarpelui
world[snake->coada->poz.line][snake->coada->poz.col] = 0; // se sterge coada, pentru ca sarpele o sa se mute
snake->oldHead = snake->head; //se pastreaza ultimul cap al sarpelui
attachHead(snake); //segmentul de coada se ataseaza capului
//se calculeaza noua pozitia a capului
if(snake->direction == 'N') {
if(snake->head.line == 0) {
snake->head.line = num_lines - 1;
} else {
snake->head.line--;
}
} else if(snake->direction == 'S') {
if(snake->head.line == num_lines - 1) {
snake->head.line = 0;
} else {
snake->head.line++;
}
} else if(snake->direction == 'V') {
if(snake->head.col == 0) {
snake->head.col = num_cols - 1;
} else {
snake->head.col--;
}
} else if(snake->direction == 'E') {
if(snake->head.col == num_cols - 1) {
snake->head.col = 0;
} else {
snake->head.col++;
}
}
//actualizare cap
snake->cap->poz.line = snake->head.line;
snake->cap->poz.col = snake->head.col;
}
void run_simulation(int num_lines, int num_cols, int **world, int num_snakes,
struct snake *snakes, int step_count, char *file_name) {
int ok = 0;
int i, j;
//completarea serpilor se face paralel, deoarece este intependent de fiecare sarpe
#pragma omp parallel for
for(i = 0; i < num_snakes; i++)
completeSnake(&snakes[i], world, num_lines, num_cols);
for(i = 0; i < step_count; i++) {
//calcularea noilor pozitii, tot paralel, din aceiasi cauza
#pragma omp parallel for
for(j = 0; j < num_snakes; j++) {
computeMoves(&snakes[j], world, num_lines, num_cols);
}
//testarea de coliziune si mutarea se face in thread-ul master
for(j = 0; j < num_snakes; j++) {
if(!checkCollision(&snakes[j], world)) {
world[snakes[j].head.line][snakes[j].head.col] = snakes[j].encoding;
} else {
ok = 1;
break;
}
}
//daca s-a intalnit o coliziune, trebuie refacuta harta la pasul anterior
if(ok) {
//pana la indicele j (indicele sarpelui care a facut coliziunea), se vor restaura capetele
#pragma omp parallel for
for(i = 0; i < j; i++) {
world[snakes[i].head.line][snakes[i].head.col] = 0;
}
//pentru toti serpii se vor restaura pozitiile capetelor anterioare
#pragma omp parallel for
for(i = 0; i < num_snakes; i++) {
snakes[i].head = snakes[i].oldHead;
world[snakes[i].lastTail.line][snakes[i].lastTail.col] = snakes[i].encoding;
}
//daca s-a intalnit coliziune trebuie iesit din loop
break;
}
}
} |
main.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include <string.h>
#include <math.h>
// COMPILACAO: gcc -fopenmp main.c -lm
#define QTD_GIS 5
#define NELEMS(x) (sizeof(x) / sizeof((x)[0]))
struct CELL
{
int isUrban; // Estado da celula
int isAvailable; // pode mudar de estado?
double probTransicao; // probabilidade de transição
double gis[QTD_GIS]; // Valores GIS desta celula
double pesos[QTD_GIS]; // pesos para cada valor gis
int qtdGis; // talvez precisemos para fazer o calculo
int pos_i;
int pos_j;
} typedef Cell;
struct AUTOMATA{
Cell* cells;
int width;
int height;
double ligma; // usada para o calculo de transicao
double alpha; // usada para o calculo de transicao
int neighborhoodSize;
}typedef Automata;
Automata* read_from_file(FILE *f);
Automata* create_automata(int, int);
void copy_automata(Automata*, Automata*);
Automata* allocate(int, int);
int* list_of_neighbors(Automata*, int, int, int, int*);
void print_automata(Automata*);
// void simulate_automata(Automata*, size_t);
void simulate_automata(Automata*, Automata*, double, int, int*);
// double calculate_prob(Cell cell);
double calculate_prob(Automata*, Cell);
int* divide_automato(Automata*, int, int*, double, double);
void free_automata(Automata*);
void printVetor(char*,int*,int);
void printVetorDouble(char*, double*, int);
int linha_inicial(int, int*);
int linha_final(int, int*);
double numerador(double, double);
double denominador(Cell);
double evaluation_score(double*, double*, int);
double develop_intensity(Cell*, int*, int);
double total_constrait();
int main(int argc, char *argv[]){
srand(time(NULL));
// int ordem = 10;
// int x = ordem, y = ordem;
int iteracoes = atoi(argv[1]);
double trashold = 0.30;
// Automata* automata = create_automata(x,y);
FILE* ptr;
ptr = fopen(argv[3],"rb");
if(ptr == NULL){
printf("Cant read file");
return 1;
}
Automata* automata = read_from_file(ptr);
Automata* automataAux = allocate(automata->width, automata->height);
copy_automata(automataAux, automata); // A = B
const int qtdThreads = atoi(argv[2]);
int* indices = (int*)malloc((qtdThreads)*sizeof(int));
indices = divide_automato(automata, qtdThreads, indices, 0.000127, 0.003674);
// printVetor("indices", indices, qtdThreads);
// puts("===Before===");
// print_automata(automata);
#pragma omp parallel num_threads(qtdThreads)
{
int thId = omp_get_thread_num();
// printf("thread %d fará de %d até %d\n", thId, linha_inicial(thId, indices), linha_final(thId, indices));
for (size_t i = 0; i < iteracoes; i++)
{
simulate_automata(automata, automataAux, trashold, thId, indices);
#pragma omp barrier
#pragma omp single
copy_automata(automataAux, automata);
#pragma omp barrier
}
}
// puts("===After===");
// print_automata(automata);
// int th_id, nthreads, flag = -1;
// #pragma omp parallel private(th_id)
// {
// th_id = omp_get_thread_num();
// printf("Hello from %d\n", th_id);
// #pragma omp barrier
// #pragma omp single
// nthreads = omp_get_num_threads();
// flag = th_id;
// #pragma omp barrier
// #pragma omp single
// printf("Hi, I'm thread %d,\nThread %d discoverd that there are %d threads\n",th_id, flag, nthreads);
// }
free_automata(automata);
free_automata(automataAux);
return 0;
}
Automata* read_from_file(FILE *f){
size_t width = 0;
size_t height = 0;
fread(&width, sizeof(int), 1, f);
fread(&height, sizeof(int), 1, f);
Automata* automata = allocate(width, height);
automata->width = width;
automata->height = height;
fread(&automata->ligma, sizeof(double), 1, f);
fread(&automata->alpha, sizeof(double), 1, f);
fread(&automata->neighborhoodSize, sizeof(int), 1, f);
fread(automata->cells,(size_t)automata->width*automata->height*sizeof(Cell), 1, f);
fclose(f);
return automata;
}
/**
* exemplo:
* Se a qtd for 2, deve-se retornar um indice k, thrad_1 => [0, k), thread_2 => [k, ordem);
* se qtd = 3, retornará dois indices: k1 e k2, thread_1 => [0, k1), thread_2 => [k1, k2), thread_3 => [k2, ordem)
* ...
* qtd = n, retorna (n-1) indices: k_1, ... K_(n-1), thread_1 => [0, k_1), ..., thread_a => [k_(a-1), k_a), ..., thread_(n-1) => [k_(n-2), ordem)
**/
int* divide_automato(Automata* automato, int qtd, int* indices, double time_uv, double time_av) {
const int ordem = automato->width; // O AUTOMATO PRECISA SER QUADRADO
double sum = 0.0;
double* linhas = (double*)malloc(ordem*sizeof(double)); // quardar a soma de cada linha do automato
for (size_t i = 0; i < ordem; i++)
{
int count_uv = 0, count_av = 0; // quantidade de celulas unVaieble e avaieble
for (size_t j = 0; j < ordem; j++)
{
if (automato->cells[i*ordem + j].isAvailable) {
count_av++;
} else {
count_uv++;
}
}
linhas[i] = count_uv*time_uv + count_av*time_av;
}
for (size_t i = 0; i < ordem; i++)
{
sum += linhas[i];
}
double workForLine = sum / qtd;
// printVetorDouble("tempo de cada linha", linhas, ordem);
// printf("Workflow = %lf\n", workForLine);
// printf("total = %lf\n", sum);
for (size_t ind = 0, i = 0; ind < qtd; ind++)
{
double sumLine = 0.0;
while (i < ordem && sumLine <= workForLine)
{
sumLine += linhas[i];
i++;
}
indices[ind] = i; // a thread ind só irá até o indice i
}
free(linhas);
return indices;
}
Cell* create_random_matrix(int width, int height){
Cell* matriz = malloc(width*height*sizeof(Cell));
for (size_t i = 0; i < width; i++)
{
for (size_t j = 0; j < height; j++)
{
matriz[i*width + j].isUrban = ((double) rand() / (double) RAND_MAX) < 0.05 ? 1 : 0; // 5% de chance da celula já ser urbanizada
matriz[i*width + j].qtdGis = QTD_GIS;
matriz[i*width + j].isAvailable = ((double) rand() / (double) RAND_MAX) >= 0.20 ? 1 : 0; // 20% de chance da celula ser imutável
matriz[i*width + j].probTransicao = (double) rand() / (double) RAND_MAX; // entre 0 e 1
matriz[i*width + j].pos_i = i;
matriz[i*width + j].pos_j = j;
for (size_t k = 0; k < QTD_GIS; k++)
{
matriz[i*width + j].gis[k] = rand() % 5 + 1; // [0, 5]
matriz[i*width + j].pesos[k] = ((double) rand() / (double) RAND_MAX) + 1; // [1.0, 2.0]
}
}
}
return matriz;
}
Automata* allocate(int width, int height) {
Automata* automata = malloc(sizeof(Automata));
automata->cells = malloc((size_t)width*height*sizeof(Cell));
return automata;
}
void free_automata(Automata* automato) {
free(automato->cells);
free(automato);
}
Automata* create_automata(int width, int height){
Automata* automata = malloc(sizeof(Automata));
automata->width = width;
automata->height = height;
automata->ligma = 10.0;
automata->alpha = 1.0;
automata->neighborhoodSize = 1;
automata->cells = create_random_matrix(width, height);
return automata;
}
void copy_automata(Automata* autoA, Automata* autoB) {
// Automata* newAutomato = malloc(sizeof(Automata));
autoA->width = autoB->width;
autoA->height = autoB->height;
autoA->ligma = autoB->ligma;
autoA->alpha = autoB->alpha;
autoA->neighborhoodSize = autoB->neighborhoodSize;
for (size_t i = 0; i < autoB->width * autoB->height; i++)
{
autoA->cells[i] = autoB->cells[i];
}
}
// n_neighbors retorna q quantidade de vizinhos
// final_array retorna os indices vizinhos de automata
int* list_of_neighbors(Automata* automata, int pos_i, int pos_j, int neighborhood_size, int* n_neighbors){
int m_size = (neighborhood_size * 2) +1;
int* temp_array = malloc(sizeof(int) * m_size * m_size);
*n_neighbors = 0;
for (int i = pos_i - neighborhood_size; i <= pos_i + neighborhood_size; i++){
for(int j = pos_j - neighborhood_size; j<= pos_j + neighborhood_size; j++){
if(i >= 0 && i < automata->width){ //inside left and right boundries
if(j >= 0 && j < automata->height){ //inside up and down boundries
if(!(i == pos_i && j == pos_j)){ //don't pick yourself
temp_array[*n_neighbors] = i*automata->width + j;
*n_neighbors = *n_neighbors + 1;
}
}
}
}
}
int* final_array = malloc(*n_neighbors * sizeof(int));
for(int i = 0; i < *n_neighbors; i++){
final_array[i] = temp_array[i];
}
free(temp_array);
return final_array;
}
Cell calculate_cell(Cell cell){
//Do something with this cell. for eg.
cell.gis[0] = 0;
return cell;
}
double calculate_prob(Automata* automato, Cell cell) {
int size = 0;
int* vizinhos = list_of_neighbors(automato, cell.pos_i, cell.pos_j, automato->neighborhoodSize, &size);
double prob = numerador(automato->ligma, automato->alpha) / denominador(cell) * develop_intensity(automato->cells, vizinhos, size) * total_constrait();
free(vizinhos);
return prob;
}
// void simulate_automata(Automata* automata, size_t n_iterations){
// for(size_t curr_iteration = 0; curr_iteration < n_iterations; curr_iteration++){
// for(int i = 0; i < automata->width; i++){
// for(int j = 0; j< automata->height; j++){
// automata->cells[i*automata->width + j] = calculate_cell(automata->cells[i*automata->width + j]);
// }
// }
// }
// }
void simulate_automata(Automata* automataAux, Automata* automata, double trashold, int thId, int* indices){
int height = automata->height;
for(int i = linha_inicial(thId, indices); i < linha_final(thId, indices); i++){
for(int j = 0; j < height; j++){
// automataAux->cells[i*automataAux->width + j].probTransicao = calculate_prob(automata->cells[i*automata->width + j]);
if (automata->cells[i*automataAux->width + j].isAvailable) {
automataAux->cells[i*automataAux->width + j].probTransicao = calculate_prob(automata, automata->cells[i*automataAux->width + j]);
automataAux->cells[i*automataAux->width + j].isUrban = automataAux->cells[i*automataAux->width + j].probTransicao >= trashold
? 1 : 0;
}
}
}
}
void print_automata(Automata* automata){
for (size_t i = 0; i < automata->width; i++)
{
for (size_t j = 0; j < automata->height; j++)
{
Cell curr_cell = automata->cells[i*automata->width + j];
printf("cell [%zu][%zu] -> isUrban: %d, mudavel: %d, prob: %lf, gis: ",
i,
j,
curr_cell.isUrban,
curr_cell.isAvailable,
curr_cell.probTransicao);
for (size_t k = 0; k < QTD_GIS; k++)
{
printf("%lf ", curr_cell.gis[k]);
}
printf("]\n");
}
}
}
void printVetor(char* str,int* vetor, int size) {
printf("%s = ", str);
for (size_t i = 0; i < size; i++)
{
printf("%d ", vetor[i]);
}
printf("\n");
}
void printVetorDouble(char* str, double* vetor, int size) {
printf("%s = ", str);
for (size_t i = 0; i < size; i++)
{
printf("%.6lf ", vetor[i]);
}
printf("\n");
}
int linha_inicial(int thId, int* indices) {
int ant = thId -1;
if (ant < 0) {
return 0; // caso especial: primeira thread, primeira linha
}
return indices[ant];
}
int linha_final(int thId, int* indices) {
return indices[thId];
}
double numerador(double ligma, double alpha) {
return 1.0 + pow(-log(ligma), alpha);
}
double denominador(Cell cell) {
double r = evaluation_score(cell.gis, cell.pesos, cell.qtdGis);
return 1.0 + exp(-r);
}
double evaluation_score(double* gis, double* pesos, int size) {
double sum = 0.0;
for (size_t i = 0; i < size; i++)
{
sum += gis[i]*pesos[i];
}
return sum;
}
double develop_intensity(Cell* matriz, int* vizinhos, int size) {
double sum = 0.0;
for (size_t i = 0; i < size; i++)
{
sum += matriz[vizinhos[i]].probTransicao;
}
return sum / size;
}
double total_constrait() {
return 1.0; // ainda nao sabemos o que e...
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal argument for the
/// swift_name attribute applied to decl \p D. Raise a diagnostic if the name
/// is invalid for the given declaration.
///
/// \p AL is used to provide caret diagnostics in case of a malformed name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
const ParsedAttr &AL);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param,
Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool IsStringInit(Expr *Init, const ArrayType *AT);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID,
SourceLocation Loc);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called to set constant rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
SmallVector<SourceLocation, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// Return the OMPTraitInfo for the surrounding scope, if any.
OMPTraitInfo *getOMPTraitInfoForSurroundingScope() {
return OMPDeclareVariantScopes.empty() ? nullptr
: OMPDeclareVariantScopes.back().TI;
}
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope. Return all base functions in \p Bases.
void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists,
SmallVectorImpl<FunctionDecl *> &Bases);
/// Register \p D as specialization of all base functions in \p Bases in the
/// current `omp begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases);
public:
/// Can we exit a scope at the moment.
bool isInOpenMPDeclareVariantScope() {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
omp-parallel-single.c | #include <omp.h>
#include <stdio.h>
#define LEN 20
int main(void)
{
int num[LEN] = {0}, k=0;
#pragma omp parallel
#pragma omp single
for (k=0; k<LEN; k++)
{
num[k] = omp_get_thread_num();
}
return 0;
}
|
mg.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB MG code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
//---------------------------------------------------------------------
// program mg
//---------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "globals.h"
#include "randdp.h"
#include "timers.h"
#include "print_results.h"
#include "../my_include/my_include.h"
static void setup(int *n1, int *n2, int *n3);
static void mg3P(double u[], double v[], double r[],
double a[4], double c[4], int n1, int n2, int n3);
static void psinv(void *or, void *ou, int n1, int n2, int n3,
double c[4], int k);
static void resid(void *ou, void *ov, void *or, int n1, int n2, int n3,
double a[4], int k);
static void rprj3(void *or, int m1k, int m2k, int m3k,
void *os, int m1j, int m2j, int m3j, int k);
static void interp(void *oz, int mm1, int mm2, int mm3,
void *ou, int n1, int n2, int n3, int k);
static void norm2u3(void *or, int n1, int n2, int n3,
double *rnm2, double *rnmu,
int nx, int ny, int nz);
static void rep_nrm(void *u, int n1, int n2, int n3, char *title, int kk);
static void comm3(void *ou, int n1, int n2, int n3, int kk);
static void zran3(void *oz, int n1, int n2, int n3, int nx1, int ny1, int k);
static void showall(void *oz, int n1, int n2, int n3);
static double power(double a, int n);
static void bubble(double ten[][2], int j1[][2], int j2[][2], int j3[][2],
int m, int ind);
static void zero3(void *oz, int n1, int n2, int n3);
//-------------------------------------------------------------------------c
// These arrays are in common because they are quite large
// and probably shouldn't be allocated on the stack. They
// are always passed as subroutine args.
//-------------------------------------------------------------------------c
/* commcon /noautom/ */
static double u[NR];
static double v[NR];
static double r[NR];
/* common /grid/ */
static int is1, is2, is3, ie1, ie2, ie3;
/* common /rans_save/ starts */
double starts[NM];
int main()
{
//-------------------------------------------------------------------------c
// k is the current level. It is passed down through subroutine args
// and is NOT global. it is the current iteration
//-------------------------------------------------------------------------c
int k, it;
double t, tinit, mflops;
double a[4], c[4];
double rnm2, rnmu, old2, oldu, epsilon;
int n1, n2, n3, nit;
double nn, verify_value, err;
logical verified;
int i;
char *t_names[T_last];
double tmax;
for (i = T_init; i < T_last; i++) {
timer_clear(i);
}
timer_start(T_init);
//---------------------------------------------------------------------
// Read in and broadcast input data
//---------------------------------------------------------------------
FILE *fp;
if ((fp = fopen("timer.flag", "r")) != NULL) {
timeron = true;
t_names[T_init] = "init";
t_names[T_bench] = "benchmk";
t_names[T_mg3P] = "mg3P";
t_names[T_psinv] = "psinv";
t_names[T_resid] = "resid";
t_names[T_rprj3] = "rprj3";
t_names[T_interp] = "interp";
t_names[T_norm2] = "norm2";
t_names[T_comm3] = "comm3";
fclose(fp);
} else {
timeron = false;
}
printf("\n\n NAS Parallel Benchmarks (NPB3.3-OMP-C) - MG Benchmark\n\n");
if ((fp = fopen("mg.input", "r")) != NULL) {
int result;
printf(" Reading from input file mg.input\n");
result = fscanf(fp, "%d\n", <);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%d%d%d", &nx[lt], &ny[lt], &nz[lt]);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%d", &nit);
while (fgetc(fp) != '\n');
for (i = 0; i <= 7; i++) {
result = fscanf(fp, "%d", &debug_vec[i]);
}
fclose(fp);
} else {
printf(" No input file. Using compiled defaults \n");
lt = LT_DEFAULT;
nit = NIT_DEFAULT;
nx[lt] = NX_DEFAULT;
ny[lt] = NY_DEFAULT;
nz[lt] = NZ_DEFAULT;
for (i = 0; i <= 7; i++) {
debug_vec[i] = DEBUG_DEFAULT;
}
}
if ( (nx[lt] != ny[lt]) || (nx[lt] != nz[lt]) ) {
Class = 'U';
} else if ( nx[lt] == 32 && nit == 4 ) {
Class = 'S';
} else if ( nx[lt] == 128 && nit == 4 ) {
Class = 'W';
} else if ( nx[lt] == 256 && nit == 4 ) {
Class = 'A';
} else if ( nx[lt] == 256 && nit == 20 ) {
Class = 'B';
} else if ( nx[lt] == 512 && nit == 20 ) {
Class = 'C';
} else if ( nx[lt] == 1024 && nit == 50 ) {
Class = 'D';
} else if ( nx[lt] == 2048 && nit == 50 ) {
Class = 'E';
} else {
Class = 'U';
}
//---------------------------------------------------------------------
// Use these for debug info:
//---------------------------------------------------------------------
// debug_vec(0) = 1 !=> report all norms
// debug_vec(1) = 1 !=> some setup information
// debug_vec(1) = 2 !=> more setup information
// debug_vec(2) = k => at level k or below, show result of resid
// debug_vec(3) = k => at level k or below, show result of psinv
// debug_vec(4) = k => at level k or below, show result of rprj
// debug_vec(5) = k => at level k or below, show result of interp
// debug_vec(6) = 1 => (unused)
// debug_vec(7) = 1 => (unused)
//---------------------------------------------------------------------
a[0] = -8.0/3.0;
a[1] = 0.0;
a[2] = 1.0/6.0;
a[3] = 1.0/12.0;
if (Class == 'A' || Class == 'S' || Class =='W') {
//---------------------------------------------------------------------
// Coefficients for the S(a) smoother
//---------------------------------------------------------------------
c[0] = -3.0/8.0;
c[1] = +1.0/32.0;
c[2] = -1.0/64.0;
c[3] = 0.0;
} else {
//---------------------------------------------------------------------
// Coefficients for the S(b) smoother
//---------------------------------------------------------------------
c[0] = -3.0/17.0;
c[1] = +1.0/33.0;
c[2] = -1.0/61.0;
c[3] = 0.0;
}
lb = 1;
k = lt;
setup(&n1, &n2, &n3);
zero3(u, n1, n2, n3);
zran3(v, n1, n2, n3, nx[lt], ny[lt], k);
norm2u3(v, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]);
// printf("\n");
// printf(" norms of random v are\n");
// printf("%4d%19.2f%19.2e\n", 0, rnm2, rnmu);
// printf(" about to evaluate resid, k=%d\n", k);
printf(" Size: %4dx%4dx%4d (class %c)\n", nx[lt], ny[lt], nz[lt], Class);
printf(" Iterations: %5d\n", nit);
printf(" Number of available threads: %5d\n", omp_get_max_threads());
printf("\n");
resid(u, v, r, n1, n2, n3, a, k);
norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]);
old2 = rnm2;
oldu = rnmu;
//---------------------------------------------------------------------
// One iteration for startup
//---------------------------------------------------------------------
mg3P(u, v, r, a, c, n1, n2, n3);
resid(u, v, r, n1, n2, n3, a, k);
setup(&n1, &n2, &n3);
zero3(u, n1, n2, n3);
zran3(v, n1, n2, n3, nx[lt], ny[lt], k);
timer_stop(T_init);
tinit = timer_read(T_init);
printf(" Initialization time: %15.3f seconds\n\n", tinit);
for (i = T_bench; i < T_last; i++) {
timer_clear(i);
}
timer_start(T_bench);
if (timeron) timer_start(T_resid2);
resid(u, v, r, n1, n2, n3, a, k);
if (timeron) timer_stop(T_resid2);
norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]);
old2 = rnm2;
oldu = rnmu;
start_crash();
for (it = 1; it <= nit; it++) {
if ((it == 1) || (it == nit) || ((it % 5) == 0)) {
printf(" iter %3d\n", it);
}
if (timeron) timer_start(T_mg3P);
mg3P(u, v, r, a, c, n1, n2, n3);
if (timeron) timer_stop(T_mg3P);
if (timeron) timer_start(T_resid2);
resid(u, v, r, n1, n2, n3, a, k);
if (timeron) timer_stop(T_resid2);
}
end_crash();
norm2u3(r, n1, n2, n3, &rnm2, &rnmu, nx[lt], ny[lt], nz[lt]);
timer_stop(T_bench);
t = timer_read(T_bench);
verified = false;
verify_value = 0.0;
printf("\n Benchmark completed\n");
epsilon = 1.0e-8;
if (Class != 'U') {
if (Class == 'S') {
verify_value = 0.5307707005734e-04;
} else if (Class == 'W') {
verify_value = 0.6467329375339e-05;
} else if (Class == 'A') {
verify_value = 0.2433365309069e-05;
} else if (Class == 'B') {
verify_value = 0.1800564401355e-05;
} else if (Class == 'C') {
verify_value = 0.5706732285740e-06;
} else if (Class == 'D') {
verify_value = 0.1583275060440e-09;
} else if (Class == 'E') {
verify_value = 0.5630442584711e-10;
}
err = fabs( rnm2 - verify_value ) / verify_value;
if (err <= epsilon) {
verified = true;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" L2 Norm is %20.13E\n", rnm2);
printf(" Error is %20.13E\n", err);
} else {
verified = false;
printf(" VERIFICATION FAILED\n");
printf(" L2 Norm is %20.13E\n", rnm2);
printf(" The correct L2 Norm is %20.13E\n", verify_value);
}
} else {
verified = false;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
printf(" L2 Norm is %20.13E\n", rnm2);
}
nn = 1.0 * nx[lt] * ny[lt] * nz[lt];
if (t != 0.0) {
mflops = 58.0 * nit * nn * 1.0e-6 / t;
} else {
mflops = 0.0;
}
print_results("MG", Class, nx[lt], ny[lt], nz[lt],
nit, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
//---------------------------------------------------------------------
// More timers
//---------------------------------------------------------------------
if (timeron) {
tmax = timer_read(T_bench);
if (tmax == 0.0) tmax = 1.0;
printf(" SECTION Time (secs)\n");
for (i = T_bench; i < T_last; i++) {
t = timer_read(i);
if (i == T_resid2) {
t = timer_read(T_resid) - t;
printf(" --> %8s:%9.3f (%6.2f%%)\n", "mg-resid", t, t*100./tmax);
} else {
printf(" %-8s:%9.3f (%6.2f%%)\n", t_names[i], t, t*100./tmax);
}
}
}
return 0;
}
static void setup(int *n1, int *n2, int *n3)
{
int k, j;
int ax, mi[MAXLEVEL+1][3];
int ng[MAXLEVEL+1][3];
ng[lt][0] = nx[lt];
ng[lt][1] = ny[lt];
ng[lt][2] = nz[lt];
for (k = lt-1; k >= 1; k--) {
for (ax = 0; ax < 3; ax++) {
ng[k][ax] = ng[k+1][ax]/2;
}
}
for (k = lt; k >= 1; k--) {
nx[k] = ng[k][0];
ny[k] = ng[k][1];
nz[k] = ng[k][2];
}
for (k = lt; k >= 1; k--) {
for (ax = 0; ax < 3; ax++) {
mi[k][ax] = 2 + ng[k][ax];
}
m1[k] = mi[k][0];
m2[k] = mi[k][1];
m3[k] = mi[k][2];
}
k = lt;
is1 = 2 + ng[k][0] - ng[lt][0];
ie1 = 1 + ng[k][0];
*n1 = 3 + ie1 - is1;
is2 = 2 + ng[k][1] - ng[lt][1];
ie2 = 1 + ng[k][1];
*n2 = 3 + ie2 - is2;
is3 = 2 + ng[k][2] - ng[lt][2];
ie3 = 1 + ng[k][2];
*n3 = 3 + ie3 - is3;
ir[lt] = 0;
for (j = lt-1; j >= 1; j--) {
ir[j] = ir[j+1]+ONE*m1[j+1]*m2[j+1]*m3[j+1];
}
if (debug_vec[1] >= 1) {
printf(" in setup, \n");
printf(" k lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n");
printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n",
k,lt,ng[k][0],ng[k][1],ng[k][2],*n1,*n2,*n3,is1,is2,is3,ie1,ie2,ie3);
}
}
//---------------------------------------------------------------------
// multigrid V-cycle routine
//---------------------------------------------------------------------
static void mg3P(double u[], double v[], double r[],
double a[4], double c[4], int n1, int n2, int n3)
{
int j, k;
//---------------------------------------------------------------------
// down cycle.
// restrict the residual from the find grid to the coarse
//---------------------------------------------------------------------
for (k = lt; k >= lb+1; k--) {
j = k - 1;
rprj3(&r[ir[k]], m1[k], m2[k], m3[k],
&r[ir[j]], m1[j], m2[j], m3[j], k);
}
k = lb;
//---------------------------------------------------------------------
// compute an approximate solution on the coarsest grid
//---------------------------------------------------------------------
zero3(&u[ir[k]], m1[k], m2[k], m3[k]);
psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k);
for (k = lb+1; k <= lt-1; k++) {
j = k - 1;
//---------------------------------------------------------------------
// prolongate from level k-1 to k
//---------------------------------------------------------------------
zero3(&u[ir[k]], m1[k], m2[k], m3[k]);
interp(&u[ir[j]], m1[j], m2[j], m3[j], &u[ir[k]], m1[k], m2[k], m3[k], k);
//---------------------------------------------------------------------
// compute residual for level k
//---------------------------------------------------------------------
resid(&u[ir[k]], &r[ir[k]], &r[ir[k]], m1[k], m2[k], m3[k], a, k);
//---------------------------------------------------------------------
// apply smoother
//---------------------------------------------------------------------
psinv(&r[ir[k]], &u[ir[k]], m1[k], m2[k], m3[k], c, k);
}
j = lt - 1;
k = lt;
interp(&u[ir[j]], m1[j], m2[j], m3[j], u, n1, n2, n3, k);
resid(u, v, r, n1, n2, n3, a, k);
psinv(r, u, n1, n2, n3, c, k);
}
//---------------------------------------------------------------------
// psinv applies an approximate inverse as smoother: u = u + Cr
//
// This implementation costs 15A + 4M per result, where
// A and M denote the costs of Addition and Multiplication.
// Presuming coefficient c(3) is zero (the NPB assumes this,
// but it is thus not a general case), 2A + 1M may be eliminated,
// resulting in 13A + 3M.
// Note that this vectorizes, and is also fine for cache
// based machines.
//---------------------------------------------------------------------
static void psinv(void *or, void *ou, int n1, int n2, int n3,
double c[4], int k)
{
double (*r)[n2][n1] = (double (*)[n2][n1])or;
double (*u)[n2][n1] = (double (*)[n2][n1])ou;
int i3, i2, i1;
double r1[M], r2[M];
if (timeron) timer_start(T_psinv);
#pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2)
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1]
+ r[i3-1][i2][i1] + r[i3+1][i2][i1];
r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1]
+ r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
u[i3][i2][i1] = u[i3][i2][i1]
+ c[0] * r[i3][i2][i1]
+ c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1]
+ r1[i1] )
+ c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] );
//--------------------------------------------------------------------
// Assume c[3] = 0 (Enable line below if c[3] not= 0)
//--------------------------------------------------------------------
// + c[3] * ( r2[i1-1] + r2[i1+1] )
//--------------------------------------------------------------------
}
}
}
if (timeron) timer_stop(T_psinv);
//---------------------------------------------------------------------
// exchange boundary points
//---------------------------------------------------------------------
comm3(u, n1, n2, n3, k);
if (debug_vec[0] >= 1) {
rep_nrm(u, n1, n2, n3, " psinv", k);
}
if (debug_vec[3] >= k) {
showall(u, n1, n2, n3);
}
}
//---------------------------------------------------------------------
// resid computes the residual: r = v - Au
//
// This implementation costs 15A + 4M per result, where
// A and M denote the costs of Addition (or Subtraction) and
// Multiplication, respectively.
// Presuming coefficient a(1) is zero (the NPB assumes this,
// but it is thus not a general case), 3A + 1M may be eliminated,
// resulting in 12A + 3M.
// Note that this vectorizes, and is also fine for cache
// based machines.
//---------------------------------------------------------------------
static void resid(void *ou, void *ov, void *or, int n1, int n2, int n3,
double a[4], int k)
{
double (*u)[n2][n1] = (double (*)[n2][n1])ou;
double (*v)[n2][n1] = (double (*)[n2][n1])ov;
double (*r)[n2][n1] = (double (*)[n2][n1])or;
int i3, i2, i1;
double u1[M], u2[M];
if (timeron) timer_start(T_resid);
#pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2)
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 0; i1 < n1; i1++) {
u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1]
+ u[i3-1][i2][i1] + u[i3+1][i2][i1];
u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1]
+ u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1];
}
for (i1 = 1; i1 < n1-1; i1++) {
r[i3][i2][i1] = v[i3][i2][i1]
- a[0] * u[i3][i2][i1]
//-------------------------------------------------------------------
// Assume a[1] = 0 (Enable 2 lines below if a[1] not= 0)
//-------------------------------------------------------------------
// - a[1] * ( u[i3][i2][i1-1] + u[i3][i2][i1+1]
// + u1[i1] )
//-------------------------------------------------------------------
- a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] )
- a[3] * ( u2[i1-1] + u2[i1+1] );
}
}
}
if (timeron) timer_stop(T_resid);
//---------------------------------------------------------------------
// exchange boundary data
//---------------------------------------------------------------------
comm3(r, n1, n2, n3, k);
if (debug_vec[0] >= 1) {
rep_nrm(r, n1, n2, n3, " resid", k);
}
if (debug_vec[2] >= k) {
showall(r, n1, n2, n3);
}
}
//---------------------------------------------------------------------
// rprj3 projects onto the next coarser grid,
// using a trilinear Finite Element projection: s = r' = P r
//
// This implementation costs 20A + 4M per result, where
// A and M denote the costs of Addition and Multiplication.
// Note that this vectorizes, and is also fine for cache
// based machines.
//---------------------------------------------------------------------
static void rprj3(void *or, int m1k, int m2k, int m3k,
void *os, int m1j, int m2j, int m3j, int k)
{
double (*r)[m2k][m1k] = (double (*)[m2k][m1k])or;
double (*s)[m2j][m1j] = (double (*)[m2j][m1j])os;
int j3, j2, j1, i3, i2, i1, d1, d2, d3, j;
double x1[M], y1[M], x2, y2;
if (timeron) timer_start(T_rprj3);
if (m1k == 3) {
d1 = 2;
} else {
d1 = 1;
}
if (m2k == 3) {
d2 = 2;
} else {
d2 = 1;
}
if (m3k == 3) {
d3 = 2;
} else {
d3 = 1;
}
#pragma omp parallel for default(shared) \
private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2)
for (j3 = 1; j3 < m3j-1; j3++) {
i3 = 2*j3-d3;
for (j2 = 1; j2 < m2j-1; j2++) {
i2 = 2*j2-d2;
for (j1 = 1; j1 < m1j; j1++) {
i1 = 2*j1-d1;
x1[i1] = r[i3+1][i2 ][i1] + r[i3+1][i2+2][i1]
+ r[i3 ][i2+1][i1] + r[i3+2][i2+1][i1];
y1[i1] = r[i3 ][i2 ][i1] + r[i3+2][i2 ][i1]
+ r[i3 ][i2+2][i1] + r[i3+2][i2+2][i1];
}
for (j1 = 1; j1 < m1j-1; j1++) {
i1 = 2*j1-d1;
y2 = r[i3 ][i2 ][i1+1] + r[i3+2][i2 ][i1+1]
+ r[i3 ][i2+2][i1+1] + r[i3+2][i2+2][i1+1];
x2 = r[i3+1][i2 ][i1+1] + r[i3+1][i2+2][i1+1]
+ r[i3 ][i2+1][i1+1] + r[i3+2][i2+1][i1+1];
s[j3][j2][j1] =
0.5 * r[i3+1][i2+1][i1+1]
+ 0.25 * (r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2)
+ 0.125 * (x1[i1] + x1[i1+2] + y2)
+ 0.0625 * (y1[i1] + y1[i1+2]);
}
}
}
if (timeron) timer_stop(T_rprj3);
j = k-1;
comm3(s, m1j, m2j, m3j, j);
if (debug_vec[0] >= 1) {
rep_nrm(s, m1j, m2j, m3j, " rprj3", k-1);
}
if (debug_vec[4] >= k) {
showall(s, m1j, m2j, m3j);
}
}
//---------------------------------------------------------------------
// interp adds the trilinear interpolation of the correction
// from the coarser grid to the current approximation: u = u + Qu'
//
// Observe that this implementation costs 16A + 4M, where
// A and M denote the costs of Addition and Multiplication.
// Note that this vectorizes, and is also fine for cache
// based machines. Vector machines may get slightly better
// performance however, with 8 separate "do i1" loops, rather than 4.
//---------------------------------------------------------------------
static void interp(void *oz, int mm1, int mm2, int mm3,
void *ou, int n1, int n2, int n3, int k)
{
double (*z)[mm2][mm1] = (double (*)[mm2][mm1])oz;
double (*u)[n2][n1] = (double (*)[n2][n1])ou;
int i3, i2, i1, d1, d2, d3, t1, t2, t3;
// note that m = 1037 in globals.h but for this only need to be
// 535 to handle up to 1024^3
// integer m
// parameter( m=535 )
double z1[M], z2[M], z3[M];
if (timeron) timer_start(T_interp);
if (n1 != 3 && n2 != 3 && n3 != 3) {
#pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3)
for (i3 = 0; i3 < mm3-1; i3++) {
for (i2 = 0; i2 < mm2-1; i2++) {
for (i1 = 0; i1 < mm1; i1++) {
z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1];
z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1];
z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1];
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1]
+ z[i3][i2][i1];
u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1]
+ 0.5 * (z[i3][i2][i1+1] + z[i3][i2][i1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1]
+ 0.5 * z1[i1];
u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1]
+ 0.25 * (z1[i1] + z1[i1+1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1]
+ 0.5 * z2[i1];
u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1]
+ 0.25 * (z2[i1] + z2[i1+1]);
}
for (i1 = 0; i1 < mm1-1; i1++) {
u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1]
+ 0.25 * z3[i1];
u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1]
+ 0.125 * (z3[i1] + z3[i1+1]);
}
}
}
} else {
if (n1 == 3) {
d1 = 2;
t1 = 1;
} else {
d1 = 1;
t1 = 0;
}
if (n2 == 3) {
d2 = 2;
t2 = 1;
} else {
d2 = 1;
t2 = 0;
}
if (n3 == 3) {
d3 = 2;
t3 = 1;
} else {
d3 = 1;
t3 = 0;
}
#pragma omp parallel default(shared) private(i1,i2,i3)
{
#pragma omp for
for (i3 = d3; i3 <= mm3-1; i3++) {
for (i2 = d2; i2 <= mm2-1; i2++) {
for (i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] =
u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1]
+ z[i3-1][i2-1][i1-1];
}
for (i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] =
u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1]
+ 0.5 * (z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]);
}
}
for (i2 = 1; i2 <= mm2-1; i2++) {
for (i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] =
u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1]
+ 0.5 * (z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
for (i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] =
u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1]
+ 0.25 * (z[i3-1][i2][i1] + z[i3-1][i2-1][i1]
+ z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
}
}
#pragma omp for nowait
for (i3 = 1; i3 <= mm3-1; i3++) {
for (i2 = d2; i2 <= mm2-1; i2++) {
for (i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] =
u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1]
+ 0.5 * (z[i3][i2-1][i1-1] + z[i3-1][i2-1][i1-1]);
}
for (i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] =
u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1]
+ 0.25 * (z[i3 ][i2-1][i1] + z[i3 ][i2-1][i1-1]
+ z[i3-1][i2-1][i1] + z[i3-1][i2-1][i1-1]);
}
}
for (i2 = 1; i2 <= mm2-1; i2++) {
for (i1 = d1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] =
u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1]
+ 0.25 * (z[i3 ][i2][i1-1] + z[i3 ][i2-1][i1-1]
+ z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
for (i1 = 1; i1 <= mm1-1; i1++) {
u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] =
u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1]
+ 0.125 * (z[i3 ][i2][i1 ] + z[i3 ][i2-1][i1 ]
+ z[i3 ][i2][i1-1] + z[i3 ][i2-1][i1-1]
+ z[i3-1][i2][i1 ] + z[i3-1][i2-1][i1 ]
+ z[i3-1][i2][i1-1] + z[i3-1][i2-1][i1-1]);
}
}
}
} // end parallel
}
if (timeron) timer_stop(T_interp);
if (debug_vec[0] >= 1) {
rep_nrm(z, mm1, mm2, mm3, "z: inter", k-1);
rep_nrm(u, n1, n2, n3, "u: inter", k);
}
if (debug_vec[5] >= k) {
showall(z, mm1, mm2, mm3);
showall(u, n1, n2, n3);
}
}
//---------------------------------------------------------------------
// norm2u3 evaluates approximations to the L2 norm and the
// uniform (or L-infinity or Chebyshev) norm, under the
// assumption that the boundaries are periodic or zero. Add the
// boundaries in with half weight (quarter weight on the edges
// and eighth weight at the corners) for inhomogeneous boundaries.
//---------------------------------------------------------------------
static void norm2u3(void *or, int n1, int n2, int n3,
double *rnm2, double *rnmu,
int nx, int ny, int nz)
{
double (*r)[n2][n1] = (double (*)[n2][n1])or;
double s, a;
int i3, i2, i1;
double dn, max_rnmu;
if (timeron) timer_start(T_norm2);
dn = 1.0*nx*ny*nz;
s = 0.0;
max_rnmu = 0.0;
#pragma omp parallel default(shared) private(i1,i2,i3,a) reduction(+:s)
{
double my_rnmu = 0.0;
#pragma omp for nowait
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 1; i1 < n1-1; i1++) {
s = s + pow(r[i3][i2][i1], 2.0);
a = fabs(r[i3][i2][i1]);
my_rnmu = (a > my_rnmu) ? a : my_rnmu;
}
}
}
if (my_rnmu > max_rnmu) {
#pragma omp critical
max_rnmu = (my_rnmu > max_rnmu) ? my_rnmu : max_rnmu;
}
} // end parallel
*rnmu = max_rnmu;
*rnm2 = sqrt(s / dn);
if (timeron) timer_stop(T_norm2);
}
//---------------------------------------------------------------------
// report on norm
//---------------------------------------------------------------------
static void rep_nrm(void *u, int n1, int n2, int n3, char *title, int kk)
{
double rnm2, rnmu;
norm2u3(u, n1, n2, n3, &rnm2, &rnmu, nx[kk], ny[kk], nz[kk]);
printf(" Level%2d in %8s: norms =%21.14E%21.14E\n", kk, title, rnm2, rnmu);
}
//---------------------------------------------------------------------
// comm3 organizes the communication on all borders
//---------------------------------------------------------------------
static void comm3(void *ou, int n1, int n2, int n3, int kk)
{
double (*u)[n2][n1] = (double (*)[n2][n1])ou;
int i1, i2, i3;
if (timeron) timer_start(T_comm3);
#pragma omp parallel default(shared) private(i1,i2,i3)
{
#pragma omp for
for (i3 = 1; i3 < n3-1; i3++) {
for (i2 = 1; i2 < n2-1; i2++) {
u[i3][i2][ 0] = u[i3][i2][n1-2];
u[i3][i2][n1-1] = u[i3][i2][ 1];
}
// }
// for (i3 = 1; i3 < n3-1; i3++) {
for (i1 = 0; i1 < n1; i1++) {
u[i3][ 0][i1] = u[i3][n2-2][i1];
u[i3][n2-1][i1] = u[i3][ 1][i1];
}
}
#pragma omp for nowait
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
u[ 0][i2][i1] = u[n3-2][i2][i1];
u[n3-1][i2][i1] = u[ 1][i2][i1];
}
}
} // end parallel
if (timeron) timer_stop(T_comm3);
}
//---------------------------------------------------------------------
// zran3 loads +1 at ten randomly chosen points,
// loads -1 at a different ten random points,
// and zero elsewhere.
//---------------------------------------------------------------------
static void zran3(void *oz, int n1, int n2, int n3, int nx1, int ny1, int k)
{
double (*z)[n2][n1] = (double (*)[n2][n1])oz;
int i0, mm0, mm1;
int i1, i2, i3, d1, e1, e2, e3;
double xx, x0, x1, a1, a2, ai;
const int mm = 10;
const double a = pow(5.0, 13.0);
const double x = 314159265.0;
double ten[mm][2], best0, best1;
int i, j1[mm][2], j2[mm][2], j3[mm][2];
int jg[4][mm][2];
double rdummy;
int myid, num_threads;
a1 = power(a, nx1);
a2 = power(a, nx1*ny1);
zero3(z, n1, n2, n3);
i = is1-2+nx1*(is2-2+ny1*(is3-2));
ai = power(a, i);
d1 = ie1 - is1 + 1;
e1 = ie1 - is1 + 2;
e2 = ie2 - is2 + 2;
e3 = ie3 - is3 + 2;
x0 = x;
rdummy = randlc(&x0, ai);
//---------------------------------------------------------------------
// save the starting seeds for the following loop
//---------------------------------------------------------------------
for (i3 = 1; i3 < e3; i3++) {
starts[i3] = x0;
rdummy = randlc(&x0, a2);
}
//---------------------------------------------------------------------
// fill array
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(i2,i3,x1,xx,rdummy) \
shared(e2,e3,d1,a1)
for (i3 = 1; i3 < e3; i3++) {
x1 = starts[i3];
for (i2 = 1; i2 < e2; i2++) {
xx = x1;
vranlc(d1, &xx, a, &(z[i3][i2][1]));
rdummy = randlc(&x1, a1);
}
}
//---------------------------------------------------------------------
// comm3(z,n1,n2,n3);
// showall(z,n1,n2,n3);
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// each thread looks for twenty candidates
//---------------------------------------------------------------------
#pragma omp parallel default(shared) private(i,i0,i1,i2,i3,j1,j2,j3,ten, \
myid,num_threads) shared(best0,best1,n1,n2,n3)
{
for (i = 0; i < mm; i++) {
ten[i][1] = 0.0;
j1[i][1] = 0;
j2[i][1] = 0;
j3[i][1] = 0;
ten[i][0] = 1.0;
j1[i][0] = 0;
j2[i][0] = 0;
j3[i][0] = 0;
}
#pragma omp for
for (i3 = 1; i3 < n3-1; i3++) {
double (*zi3)[n1] = z[i3];
for (i2 = 1; i2 < n2-1; i2++) {
for (i1 = 1; i1 < n1-1; i1++) {
if (zi3[i2][i1] > ten[0][1]) {
ten[0][1] = zi3[i2][i1];
j1[0][1] = i1;
j2[0][1] = i2;
j3[0][1] = i3;
bubble(ten, j1, j2, j3, mm, 1);
}
if (zi3[i2][i1] < ten[0][0]) {
ten[0][0] = zi3[i2][i1];
j1[0][0] = i1;
j2[0][0] = i2;
j3[0][0] = i3;
bubble(ten, j1, j2, j3, mm, 0);
}
}
}
}
//---------------------------------------------------------------------
// Now which of these are globally best?
//---------------------------------------------------------------------
i1 = mm - 1;
i0 = mm - 1;
myid = 0;
myid = omp_get_thread_num();
num_threads = omp_get_num_threads();
for (i = mm - 1; i >= 0; i--) {
// ... ORDERED access is required here for sequential consistency
// ... in case that two values are identical.
// ... Since an "ORDERED" section is only defined in OpenMP 2,
// ... we use a dummy loop to emulate ordered access in OpenMP 1.x.
#pragma omp master
{
best1 = 0.0;
best0 = 1.0;
}
#pragma omp for ordered schedule(static)
for (i2 = 1; i2 <= num_threads; i2++) {
#pragma omp ordered
{
if (ten[i1][1] > best1) {
best1 = ten[i1][1];
jg[0][i][1] = myid;
}
if (ten[i0][0] < best0) {
best0 = ten[i0][0];
jg[0][i][0] = myid;
}
}
}
if (myid == jg[0][i][1]) {
jg[1][i][1] = j1[i1][1];
jg[2][i][1] = j2[i1][1];
jg[3][i][1] = j3[i1][1];
i1 = i1-1;
}
if (myid == jg[0][i][0]) {
jg[1][i][0] = j1[i0][0];
jg[2][i][0] = j2[i0][0];
jg[3][i][0] = j3[i0][0];
i0 = i0-1;
}
}
} // end parallel
// mm1 = i1+1;
// mm0 = i0+1;
mm1 = 0;
mm0 = 0;
/*
int cnt = 0;
printf(" \n");
printf(" negative charges at\n");
for (i = 0; i < mm; i++) {
printf(" (%3d,%3d,%3d)", jg[1][i][0], jg[2][i][0], jg[3][i][0]);
if (++cnt % 5 == 0) printf("\n");
}
cnt = 0;
printf(" positive charges at\n");
for (i = 0; i < mm; i++) {
printf(" (%3d,%3d,%3d)", jg[1][i][1], jg[2][i][1], jg[3][i][1]);
if (++cnt % 5 == 0) printf("\n");
}
cnt = 0;
printf(" small random numbers were\n");
for (i = mm-1; i >= 0; i--) {
printf(" %15.8E", ten[i][0]);
if (++cnt % 5 == 0) printf("\n");
}
cnt = 0;
printf(" and they were found on processor number\n");
for (i = mm-1; i >= 0; i--) {
printf(" %4d", jg[0][i][0]);
if (++cnt % 10 == 0) printf("\n");
}
cnt = 0;
printf(" large random numbers were\n");
for (i = mm-1; i >= 0; i--) {
printf(" %15.8E", ten[i][1]);
if (++cnt % 5 == 0) printf("\n");
}
cnt = 0;
printf(" and they were found on processor number\n");
for (i = mm-1; i >= 0; i--) {
printf(" %4d", jg[0][i][1]);
if (++cnt % 10 == 0) printf("\n");
}
*/
#pragma omp parallel for default(shared) private(i1,i2,i3)
for (i3 = 0; i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}
for (i = mm-1; i >= mm0; i--) {
z[jg[3][i][0]][jg[2][i][0]][jg[1][i][0]] = -1.0;
}
for (i = mm-1; i >= mm1; i--) {
z[jg[3][i][1]][jg[2][i][1]][jg[1][i][1]] = +1.0;
}
comm3(z, n1, n2, n3, k);
//---------------------------------------------------------------------
// showall(z,n1,n2,n3);
//---------------------------------------------------------------------
}
static void showall(void *oz, int n1, int n2, int n3)
{
double (*z)[n2][n1] = (double (*)[n2][n1])oz;
int i1, i2, i3;
int m1, m2, m3;
m1 = min(n1, 18);
m2 = min(n2, 14);
m3 = min(n3, 18);
printf(" \n");
for (i3 = 0; i3 < m3; i3++) {
for (i1 = 0; i1 < m1; i1++) {
for (i2 = 0; i2 < m2; i2++) {
printf("%6.3f", z[i3][i2][i1]);
}
printf("\n");
}
printf(" - - - - - - - \n");
}
printf(" \n");
}
//---------------------------------------------------------------------
// power raises an integer, disguised as a double
// precision real, to an integer power
//---------------------------------------------------------------------
static double power(double a, int n)
{
double aj;
int nj;
double rdummy;
double power;
power = 1.0;
nj = n;
aj = a;
while (nj != 0) {
if ((nj % 2) == 1) rdummy = randlc(&power, aj);
rdummy = randlc(&aj, aj);
nj = nj/2;
}
return power;
}
//---------------------------------------------------------------------
// bubble does a bubble sort in direction dir
//---------------------------------------------------------------------
static void bubble(double ten[][2], int j1[][2], int j2[][2], int j3[][2],
int m, int ind)
{
double temp;
int i, j_temp;
if (ind == 1) {
for (i = 0; i < m-1; i++) {
if (ten[i][ind] > ten[i+1][ind]) {
temp = ten[i+1][ind];
ten[i+1][ind] = ten[i][ind];
ten[i][ind] = temp;
j_temp = j1[i+1][ind];
j1[i+1][ind] = j1[i][ind];
j1[i][ind] = j_temp;
j_temp = j2[i+1][ind];
j2[i+1][ind] = j2[i][ind];
j2[i][ind] = j_temp;
j_temp = j3[i+1][ind];
j3[i+1][ind] = j3[i][ind];
j3[i][ind] = j_temp;
} else {
return;
}
}
} else {
for (i = 0; i < m-1; i++) {
if (ten[i][ind] < ten[i+1][ind]) {
temp = ten[i+1][ind];
ten[i+1][ind] = ten[i][ind];
ten[i][ind] = temp;
j_temp = j1[i+1][ind];
j1[i+1][ind] = j1[i][ind];
j1[i][ind] = j_temp;
j_temp = j2[i+1][ind];
j2[i+1][ind] = j2[i][ind];
j2[i][ind] = j_temp;
j_temp = j3[i+1][ind];
j3[i+1][ind] = j3[i][ind];
j3[i][ind] = j_temp;
} else {
return;
}
}
}
}
static void zero3(void *oz, int n1, int n2, int n3)
{
double (*z)[n2][n1] = (double (*)[n2][n1])oz;
int i1, i2, i3;
#pragma omp parallel for default(shared) private(i1,i2,i3)
for (i3 = 0; i3 < n3; i3++) {
for (i2 = 0; i2 < n2; i2++) {
for (i1 = 0; i1 < n1; i1++) {
z[i3][i2][i1] = 0.0;
}
}
}
}
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <dmlc/serializer.h>
#include <rabit/rabit.h>
#include <xgboost/base.h>
#include <xgboost/span.h>
#include <xgboost/host_device_vector.h>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace xgboost {
// forward declare dmatrix.
class DMatrix;
/*! \brief data type accepted by xgboost interface */
enum class DataType : uint8_t {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of data fields in MetaInfo */
static constexpr uint64_t kNumField = 9;
/*! \brief number of rows in the data */
uint64_t num_row_{0}; // NOLINT
/*! \brief number of columns in the data */
uint64_t num_col_{0}; // NOLINT
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0}; // NOLINT
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_; // NOLINT
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_group_t> group_ptr_; // NOLINT
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_; // NOLINT
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_; // NOLINT
/*!
* \brief lower bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT
/*!
* \brief upper bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT
/*! \brief default constructor */
MetaInfo() = default;
MetaInfo(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo const& that) {
this->num_row_ = that.num_row_;
this->num_col_ = that.num_col_;
this->num_nonzero_ = that.num_nonzero_;
this->labels_.Resize(that.labels_.Size());
this->labels_.Copy(that.labels_);
this->group_ptr_ = that.group_ptr_;
this->weights_.Resize(that.weights_.Size());
this->weights_.Copy(that.weights_);
this->base_margin_.Resize(that.base_margin_.Size());
this->base_margin_.Copy(that.base_margin_);
this->labels_lower_bound_.Resize(that.labels_lower_bound_.Size());
this->labels_lower_bound_.Copy(that.labels_lower_bound_);
this->labels_upper_bound_.Resize(that.labels_upper_bound_.Size());
this->labels_upper_bound_.Copy(that.labels_upper_bound_);
return *this;
}
/*!
* \brief Validate all metainfo.
*/
void Validate(int32_t device) const;
MetaInfo Slice(common::Span<int32_t const> ridxs) const;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
/*!
* \brief Set information in the meta info with array interface.
* \param key The key of the information.
* \param interface_str String representation of json format array interface.
*
* [ column_0, column_1, ... column_n ]
*
* Right now only 1 column is permitted.
*/
void SetInfo(const char* key, std::string const& interface_str);
/*
* \brief Extend with other MetaInfo.
*
* \param that The other MetaInfo object.
*
* \param accumulate_rows Whether rows need to be accumulated in this function. If
* client code knows number of rows in advance, set this parameter to false.
*/
void Extend(MetaInfo const& that, bool accumulate_rows);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_feature_t index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief Parameters for constructing batches.
*/
struct BatchParam {
/*! \brief The GPU device to use. */
int gpu_id;
/*! \brief Maximum number of bins per feature for histograms. */
int max_bin{0};
/*! \brief Page size for external memory mode. */
size_t gpu_page_size;
BatchParam() = default;
BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0)
: gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {}
inline bool operator!=(const BatchParam& other) const {
return gpu_id != other.gpu_id || max_bin != other.max_bin ||
gpu_page_size != other.gpu_page_size;
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<bst_row_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid{};
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return Number of instances in the page. */
inline size_t Size() const {
return offset.Size() == 0 ? 0 : offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
/*! \brief Set the base row id for this page. */
inline void SetBaseRowId(size_t row_id) {
base_rowid = row_id;
}
SparsePage GetTranspose(int num_columns) const;
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/**
* \brief Pushes external data batch onto this page
*
* \tparam AdapterBatchT
* \param batch
* \param missing
* \param nthread
*
* \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns.
*/
template <typename AdapterBatchT>
uint64_t Push(const AdapterBatchT& batch, float missing, int nthread);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class EllpackPageImpl;
/*!
* \brief A page stored in ELLPACK format.
*
* This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid
* including CUDA-specific implementation details in the header.
*/
class EllpackPage {
public:
/*!
* \brief Default constructor.
*
* This is used in the external memory case. An empty ELLPACK page is constructed with its content
* set later by the reader.
*/
EllpackPage();
/*!
* \brief Constructor from an existing DMatrix.
*
* This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix
* in CSR format.
*/
explicit EllpackPage(DMatrix* dmat, const BatchParam& param);
/*! \brief Destructor. */
~EllpackPage();
EllpackPage(EllpackPage&& that);
/*! \return Number of instances in the page. */
size_t Size() const;
/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id);
const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
private:
std::unique_ptr<EllpackPageImpl> impl_;
};
template<typename T>
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() = default;
virtual T& operator*() = 0;
virtual const T& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag; // NOLINT
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
T& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {}
BatchIterator<T> begin() { return begin_iter_; } // NOLINT
BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT
private:
BatchIterator<T> begin_iter_;
};
/*!
* \brief Internal data structured used by XGBoost during training.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
virtual void SetInfo(const char *key, const void *dptr, DataType dtype,
size_t num) {
this->Info().SetInfo(key, dptr, dtype, num);
}
virtual void SetInfo(const char* key, std::string const& interface_str) {
this->Info().SetInfo(key, interface_str);
}
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches(const BatchParam& param = {});
template <typename T>
bool PageExists() const;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*! \brief Whether the matrix is dense. */
bool IsDense() const {
return Info().num_nonzero_ == Info().num_row_ * Info().num_col_;
}
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
size_t page_size = kPageSize);
/**
* \brief Creates a new DMatrix from an external data adapter.
*
* \tparam AdapterT Type of the adapter.
* \param [in,out] adapter View onto an external data.
* \param missing Values to count as missing.
* \param nthread Number of threads for construction.
* \param cache_prefix (Optional) The cache prefix for external memory.
* \param page_size (Optional) Size of the page.
*
* \return a Created DMatrix.
*/
template <typename AdapterT>
static DMatrix* Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix = "",
size_t page_size = kPageSize);
/**
* \brief Create a new Quantile based DMatrix used for histogram based algorithm.
*
* \tparam DataIterHandle External iterator type, defined in C API.
* \tparam DMatrixHandle DMatrix handle, defined in C API.
* \tparam DataIterResetCallback Callback for reset, prototype defined in C API.
* \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API.
*
* \param iter External data iterator
* \param proxy A hanlde to ProxyDMatrix
* \param reset Callback for reset
* \param next Callback for next
* \param missing Value that should be treated as missing.
* \param nthread number of threads used for initialization.
* \param max_bin Maximum number of bins.
*
* \return A created quantile based DMatrix.
*/
template <typename DataIterHandle, typename DMatrixHandle,
typename DataIterResetCallback, typename XGDMatrixCallbackNext>
static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy,
DataIterResetCallback *reset,
XGDMatrixCallbackNext *next, float missing,
int nthread,
int max_bin);
virtual DMatrix *Slice(common::Span<int32_t const> ridxs) = 0;
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0;
virtual bool EllpackExists() const = 0;
virtual bool SparsePageExists() const = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) {
return GetRowBatches();
}
template<>
inline bool DMatrix::PageExists<EllpackPage>() const {
return this->EllpackExists();
}
template<>
inline bool DMatrix::PageExists<SparsePage>() const {
return this->SparsePageExists();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetSortedColumnBatches();
}
template<>
inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) {
return GetEllpackBatches(param);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
namespace serializer {
template <>
struct Handler<xgboost::Entry> {
inline static void Write(Stream* strm, const xgboost::Entry& data) {
strm->Write(data.index);
strm->Write(data.fvalue);
}
inline static bool Read(Stream* strm, xgboost::Entry* data) {
return strm->Read(&data->index) && strm->Read(&data->fvalue);
}
};
} // namespace serializer
} // namespace dmlc
#endif // XGBOOST_DATA_H_
|
DynamicMatrix.h | /*
* DynamicMatrix.h
*
* Created on: 13.03.2014
* Author: Michael Wegner (michael.wegner@student.kit.edu)
*/
#ifndef DYNAMIC_MATRIX_H_
#define DYNAMIC_MATRIX_H_
#include "../graph/Graph.h"
#include "Vector.h"
#include "SparseAccumulator.h"
#include "AlgebraicGlobals.h"
namespace NetworKit {
/**
* @ingroup algebraic
* The DynamicMatrix class represents a matrix that is optimized for sparse matrices and internally uses a graph data structure.
* DynamicMatrix should be used when changes to the structure of the matrix are frequent.
*/
class DynamicMatrix {
protected:
Graph graph;
count nRows;
count nCols;
double zero;
public:
/** Default constructor */
DynamicMatrix();
/**
* Constructs the Matrix with size @a dimension x @a dimension.
* @param dimension Defines how many rows and columns this matrix has.
* @param zero The zero element (default is 0.0).
*/
DynamicMatrix(const count dimension, const double zero = 0.0);
/**
* Constructs the Matrix with size @a nRows x @a nCols.
* @param nRows Number of rows.
* @param nCols Number of columns.
* @param zero The zero element (default is 0.0).
*/
DynamicMatrix(const count nRows, const count nCols, const double zero = 0.0);
/**
* Constructs the @a dimension x @a dimension Matrix from the elements at position @a positions with values @values.
* @param dimension Defines how many rows and columns this matrix has.
* @param triplets The nonzero elements.
* @param zero The zero element (default is 0.0).
*/
DynamicMatrix(const count dimension, const std::vector<Triplet>& triplets, const double zero = 0.0);
/**
* Constructs the @a nRows x @a nCols Matrix from the elements at position @a positions with values @values.
* @param nRows Defines how many rows this matrix has.
* @param nCols Defines how many columns this matrix has.
* @param triplets The nonzero elements.
* @param zero The zero element (default is 0.0).
*/
DynamicMatrix(const count nRows, const count nCols, const std::vector<Triplet>& triplets, const double zero = 0.0);
/** Default copy constructor */
DynamicMatrix(const DynamicMatrix &other) = default;
/** Default move constructor */
DynamicMatrix(DynamicMatrix &&other) = default;
/** Default destructor */
virtual ~DynamicMatrix() = default;
/** Default move assignment operator */
DynamicMatrix& operator=(DynamicMatrix &&other) = default;
/** Default copy assignment operator */
DynamicMatrix& operator=(const DynamicMatrix &other) = default;
/**
* Compares this matrix to @a other and returns true if the shape and zero element are the same as well as
* all entries, otherwise returns false.
* @param other
*/
bool operator==(const DynamicMatrix& other) const {
bool graphsEqual = graph.numberOfNodes() == other.graph.numberOfNodes() && graph.numberOfEdges() == other.graph.numberOfEdges();
if (graphsEqual) {
graph.forEdges([&](node u, node v, edgeweight w) {
if (w != other.graph.weight(u, v)) {
graphsEqual = false;
return;
}
});
}
return graphsEqual && nRows == other.nRows && nCols == other.nCols && zero == other.zero;
}
/**
* Compares this matrix to @a other and returns false if the shape and zero element are the same as well as
* all entries, otherwise returns true.
* @param other
*/
bool operator!=(const DynamicMatrix& other) const {
return !((*this) == other);
}
/**
* @return Number of rows.
*/
inline count numberOfRows() const {
return nRows;
}
/**
* @return Number of columns.
*/
inline count numberOfColumns() const {
return nCols;
}
/**
* Returns the zero element of the matrix.
*/
inline double getZero() const {
return zero;
}
/**
* @param i The row index.
* @return Number of non-zeros in row @a i.
*/
count nnzInRow(const index i) const;
/**
* @return Number of non-zeros in this matrix.
*/
count nnz() const;
/**
* @return Value at matrix position (i,j).
*/
double operator()(const index i, const index j) const;
/**
* Set the matrix at position (@a i, @a j) to @a value.
*/
void setValue(const index i, const index j, const double value);
/**
* @return Row @a i of this matrix as vector.
*/
Vector row(const index i) const;
/**
* @return Column @a j of this matrix as vector.
*/
Vector column(const index j) const;
/**
* @return The main diagonal of this matrix.
*/
Vector diagonal() const;
/**
* Adds this matrix to @a other and returns the result.
* @return The sum of this matrix and @a other.
*/
DynamicMatrix operator+(const DynamicMatrix &other) const;
/**
* Adds @a other to this matrix.
* @return Reference to this matrix.
*/
DynamicMatrix& operator+=(const DynamicMatrix &other);
/**
* Subtracts @a other from this matrix and returns the result.
* @return The difference of this matrix and @a other.
*
*/
DynamicMatrix operator-(const DynamicMatrix &other) const;
/**
* Subtracts @a other from this matrix.
* @return Reference to this matrix.
*/
DynamicMatrix& operator-=(const DynamicMatrix &other);
/**
* Multiplies this matrix with a scalar specified in @a scalar and returns the result.
* @return The result of multiplying this matrix with @a scalar.
*/
DynamicMatrix operator*(const double scalar) const;
/**
* Multiplies this matrix with a scalar specified in @a scalar.
* @return Reference to this matrix.
*/
DynamicMatrix& operator*=(const double scalar);
/**
* Multiplies this matrix with @a vector and returns the result.
* @return The result of multiplying this matrix with @a vector.
*/
Vector operator*(const Vector &vector) const;
/**
* Multiplies this matrix with @a other and returns the result in a new matrix.
* @return The result of multiplying this matrix with @a other.
*/
DynamicMatrix operator*(const DynamicMatrix &other) const;
/**
* Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix.
* @return The result of dividing this matrix by @a divisor.
*/
DynamicMatrix operator/(const double divisor) const;
/**
* Divides this matrix by a divisor specified in @a divisor.
* @return Reference to this matrix.
*/
DynamicMatrix& operator/=(const double divisor);
/**
* Computes A^T * B.
* @param A
* @param B
*/
static DynamicMatrix mTmMultiply(const DynamicMatrix &A, const DynamicMatrix &B);
/**
* Computes A * B^T.
* @param A
* @param B
*/
static DynamicMatrix mmTMultiply(const DynamicMatrix &A, const DynamicMatrix &B);
/**
* Computes matrix^T * vector
* @param matrix
* @param vector
*/
static Vector mTvMultiply(const DynamicMatrix &matrix, const Vector &vector);
/**
* Transposes this matrix and returns it.
*/
DynamicMatrix transpose() const;
/**
* Extracts a matrix with rows and columns specified by @a rowIndices and @a columnIndices from this matrix.
* The order of rows and columns is equal to the order in @a rowIndices and @a columnIndices. It is also
* possible to specify a row or column more than once to get duplicates.
* @param rowIndices
* @param columnIndices
*/
DynamicMatrix extract(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices) const;
/**
* Assign the contents of the matrix @a source to this matrix at rows and columns specified by @a rowIndices and
* @a columnIndices. That is, entry (i,j) of @a source is assigned to entry (rowIndices[i], columnIndices[j]) of
* this matrix. Note that the dimensions of @rowIndices and @a columnIndices must coincide with the number of rows
* and columns of @a source.
* @param rowIndices
* @param columnIndices
* @param source
*/
void assign(const std::vector<index>& rowIndices, const std::vector<index>& columnIndices, const DynamicMatrix& source);
/**
* Applies the unary function @a unaryElementFunction to each value in the matrix. Note that it must hold that the
* function applied to the zero element of this matrix returns the zero element.
* @param unaryElementFunction
*/
template<typename F>
void apply(const F unaryElementFunction);
/**
* Returns the (weighted) adjacency matrix of the (weighted) Graph @a graph.
* @param graph
*/
static DynamicMatrix adjacencyMatrix(const Graph& graph, double zero = 0.0);
/**
* Creates a diagonal matrix with dimension equal to the dimension of the Vector @a diagonalElements. The values on
* the diagonal are the ones stored in @a diagonalElements (i.e. D(i,i) = diagonalElements[i]).
* @param diagonalElements
*/
static DynamicMatrix diagonalMatrix(const Vector& diagonalElements, double zero = 0.0);
/**
* Returns the (weighted) incidence matrix of the (weighted) Graph @a graph.
* @param graph
*/
static DynamicMatrix incidenceMatrix(const Graph& graph, double zero = 0.0);
/**
* Returns the (weighted) Laplacian matrix of the (weighteD) Graph @a graph.
* @param graph
*/
static DynamicMatrix laplacianMatrix(const Graph& graph,double zero = 0.0);
/**
* Returns the (weighted) normalized Laplacian matrix of the (weighted) Graph @a graph
* @param graph
*/
static DynamicMatrix normalizedLaplacianMatrix(const Graph& graph, double zero = 0.0);
/**
* Iterate over all non-zero elements of row @a row in the matrix and call handle(index row, index column, double value)
*/
template<typename L> void forNonZeroElementsInRow(index row, L handle) const;
/**
* Iterate over all elements in row @a i in the matrix and call handle(index column, double value)
*/
template<typename L> void forElementsInRow(index i, L handle) const;
/**
* Iterate over all non-zero elements of the matrix in row order and call handle(index row, index column, double value).
*/
template<typename L> void forNonZeroElementsInRowOrder(L handle) const;
/**
* Iterate in parallel over all rows and call handle(index row, index column, double value) on non-zero elements of the matrix.
*/
template<typename L> void parallelForNonZeroElementsInRowOrder(L handle) const;
};
} /* namespace NetworKit */
template<typename F>
void NetworKit::DynamicMatrix::apply(const F unaryElementFunction) {
forNonZeroElementsInRowOrder([&](index i, index j, double value) {
setValue(i,j, unaryElementFunction(value));
});
}
template<typename L>
inline void NetworKit::DynamicMatrix::forNonZeroElementsInRow(index row, L handle) const {
graph.forEdgesOf(row, [&](index j, edgeweight weight){
handle(j, weight);
});
}
template<typename L>
inline void NetworKit::DynamicMatrix::forElementsInRow(index i, L handle) const {
Vector rowVector = row(i);
index j = 0;
rowVector.forElements([&](double value) {
handle(j++, value);
});
}
template<typename L>
inline void NetworKit::DynamicMatrix::forNonZeroElementsInRowOrder(L handle) const {
for (index i = 0; i < nRows; ++i) {
graph.forEdgesOf(i, [&](index j, edgeweight weight){
handle(i, j, weight);
});
}
}
template<typename L>
inline void NetworKit::DynamicMatrix::parallelForNonZeroElementsInRowOrder(L handle) const {
#pragma omp parallel for
for (omp_index i = 0; i < static_cast<omp_index>(nRows); ++i) {
graph.forEdgesOf(i, [&](index j, edgeweight weight){
handle(i, j, weight);
});
}
}
#endif /* DYNAMIC_MATRIX_H_ */
|
omp_set_wait_policy_overhead.c | // 2-process
// set wait policy, test2(0):passive / test2(1):active
// uncomment omp_quiesce() and omp_begin2() to check the time for quiesce policy
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include <omp.h>
#include <sys/timeb.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <sys/types.h>
#include <signal.h>
void *omp_parallel_foo(void *ptr);
/**Important: make sure you use num_threads clause in parallel direction and set it to the
* number of hardware cores, not the number of cores Linux gives or the default from OpenMP
*
* cat /proc/cpuinfo and check the processor id, core id and CPU model number so you can look up fron internet
* Lennon is Xeon CPU E5-2683 v3 @ 2.00GHz, it has two CPU for total 28 cores, but support upto 56 threads
* Paul is Xeon CPU E5-2695 v2 @ 2.40GHz, it has two CPU for total 24 cores, support upto 48 threads
* Fornax Intel® Xeon® E5-2699 v3 2.3GHz, it has two CPU for total 36 cores, support upto 72 threads.
*
* Use -O0 optimization
*/
int total_cores = 2;
void busy_waiting4(int time);
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
void test2(int type){
pid_t proc_id;
int i = 0;
double tt;
proc_id = fork();
if(type==1){
omp_set_wait_policy(1); // active
}else{
omp_set_wait_policy(0); // passive
}
if(proc_id==0){
tt = read_timer();
}
omp_set_nested(1);
for (i=0; i<3; i++){
if ((proc_id+i)%2==0){
printf("%d pid %d\n", proc_id, getpid());
busy_waiting4(1);
#pragma omp parallel num_threads(2)
{
int tid = omp_get_thread_num();
// printf("level 2 id %d\n", tid);
busy_waiting4(1);
int pid = getpid();
int thid = syscall(SYS_gettid);
printf("level 2 ompid:%d pid:%d tid:%d\n", tid, pid, thid);
}
// omp_quiesce();
// omp_begin2();
}else{
printf("%d pid %d\n", proc_id, getpid());
int tid = omp_get_thread_num();
// printf("level 1 id %d\n", tid);
#pragma omp parallel num_threads(2)
{
int tid = omp_get_thread_num();
busy_waiting4(1);
int pid = getpid();
int thid = syscall(SYS_gettid);
printf("level 2 ompid:%d pid:%d tid:%d\n", tid, pid, thid);
}
// omp_quiesce();
// omp_begin2();
busy_waiting4(1);
}
}
if( proc_id == 0){
tt = read_timer()-tt;
if(type==0){
printf("passive: %f\n", tt);
}else{
printf("active: %f\n", tt);
}
}
// while(1);
}
int main(int argc, char * argv[])
{
// test2(0);
test2(1);
}
void busy_waiting4(int second) {
clock_t ticks1, ticks2;
ticks1 = clock();
ticks2 = ticks1;
while((ticks2/CLOCKS_PER_SEC-ticks1/CLOCKS_PER_SEC) < second)
ticks2 = clock();
// printf("It took %ld ticks to wait one second.\n",ticks2-ticks1);
// printf("This value should be the same as CLOCKS_PER_SEC which is %d.\n",CLOCKS_PER_SEC);
}
|
cg_aux.c | //MIT License
//
//Copyright (c) 2018 Sicong Zhuang
//
//Permission is hereby granted, free of charge, to any person obtaining a copy
//of this software and associated documentation files (the "Software"), to deal
//in the Software without restriction, including without limitation the rights
//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
//copies of the Software, and to permit persons to whom the Software is
//furnished to do so, subject to the following conditions:
//
//The above copyright notice and this permission notice shall be included in all
//copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
//SOFTWARE.
#include "cg_aux.h"
const char *scan_dconspec = "%lf";
const char *scan_sconspec = "%f";
void hb_read_double(char *input_file, int *m, int *n, int *elemc, int **vptr, int **vpos, double **vval)
{
double *exact = NULL;
double *guess = NULL;
int i;
int indcrd;
char *indfmt = NULL;
FILE *input;
int j;
char *key = NULL;
int khi;
int klo;
char *mxtype = NULL;
int neltvl;
int nrhs;
int nrhsix;
int ptrcrd;
char *ptrfmt = NULL;
int rhscrd;
char *rhsfmt = NULL;
int *rhsind = NULL;
int *rhsptr = NULL;
char *rhstyp = NULL;
double *rhsval = NULL;
double *rhsvec = NULL;
char *title = NULL;
int totcrd;
int valcrd;
char *valfmt = NULL;
int nrow;
int ncol;
int nnzero;
int *colptr = NULL;
int *rowind = NULL;
double *values = NULL;
input = fopen ( input_file, "rt" );
if ( !input )
{
printf ( " Error opening the file.\n" );
return;
}
hb_file_read ( input, &title, &key, &totcrd, &ptrcrd, &indcrd,
&valcrd, &rhscrd, &mxtype, &nrow, &ncol, &nnzero, &neltvl,
&ptrfmt, &indfmt, &valfmt, &rhsfmt, &rhstyp, &nrhs, &nrhsix,
&colptr, &rowind, &values, &rhsval, &rhsptr, &rhsind, &rhsvec,
&guess, &exact );
fclose ( input );
if ( exact )
{
free ( exact );
}
if ( guess )
{
free ( guess );
}
if ( rhsind )
{
free ( rhsind );
}
if ( rhsptr )
{
free ( rhsptr );
}
if ( rhsval )
{
free ( rhsval );
}
if ( rhsvec )
{
free ( rhsvec );
}
*m = nrow;
*n = ncol;
*elemc = nnzero;
*vptr = colptr;
*vpos = rowind;
*vval = values;
return;
}
void hb_reset(hbmat_t *A)
{
A->m = 0; A->n =0; A->elemc = 0;
A->vptr = 0; A->vpos = 0; A->vval = 0;
A->vdiag = NULL;
A->b = 0;
A->trans = 0; A->orig = 0; A->hyper = 0;
A->orig_row = 0; A->orig_col = 0; A->e_tree = 0;
A->type = 0;
A->FACT = 0;
}
void one2zero(hbmat_t* in_matrix)
{
int m = in_matrix->m;
int elemc = in_matrix->elemc;
int *vptr = in_matrix->vptr;
int *vpos = in_matrix->vpos;
int i;
for ( i = 0; i <= m; i++ ) {
vptr[i]--;
}
for ( i=0; i<elemc; i++ ) {
vpos[i]--;
}
}
typedef struct _sparse_nodes {
int row;
int col;
double val;
struct sparse_node *next;
struct sparse_node *current;
} _sn_t;
/* Expand an symmetric half matrix B to its full counterpart A */
void hb_sym_expand(hbmat_t *A, hbmat_t *B)
{
hb_init_basic(A, B);
int m = A->m;
A->elemc = B->elemc * 2 - m;
int nnz = A->elemc;
A->vptr = malloc((m+1) * sizeof(int));
A->vpos = malloc(nnz * sizeof(int));
A->vval = malloc(nnz * sizeof(double));
int *vptra = A->vptr; int *vposa = A->vpos; double *vvala = A->vval;
int *vptrb = B->vptr; int *vposb = B->vpos; double *vvalb = B->vval;
_sn_t *ll_mat = malloc(m * sizeof(_sn_t));
int i;
for ( i = 0; i < m; i++ ) {
ll_mat[i].row = m;
ll_mat[i].col = -1;
ll_mat[i].val = 0.0;
ll_mat[i].next = NULL;
ll_mat[i].current = &ll_mat[i];
}
int vptr_c = 0;
int elemc_c = 0;
for ( i = 0; i < m; i++ ) {
vptra[i] = vptr_c;
int bptr = vptrb[i];
int eptr = vptrb[i+1];
/* Fill the lower csr info */
_sn_t *c = &ll_mat[i];
while ( c != NULL && c->col != -1 ) {
vposa[elemc_c] = c->col;
vvala[elemc_c] = c->val;
elemc_c++;
vptr_c++;
c = c->next;
}
/* Copy the upper csr info */
int j;
for ( j = bptr; j < eptr; j++ ) {
int col = vposb[j];
double val = vvalb[j];
vposa[elemc_c] = col;
vvala[elemc_c] = val;
elemc_c++;
/*-------------------------------------------------*
* linked list insert
*-------------------------------------------------*/
_sn_t *head = &ll_mat[col];
_sn_t *current = ll_mat[col].current;
if ( current->col != -1 ) {
current->next = malloc(sizeof(_sn_t));
current = current->next;
head->current = current;
}
current->row = col;
current->col = i;
current->val = val;
current->next = NULL;
current->current = current;
/*-------------------------------------------------*
* linked list insert end
*-------------------------------------------------*/
}
vptr_c += eptr - bptr;
}
vptra[m] = vptr_c;
for ( i = 0; i < m; i++ ) {
_sn_t *c= ll_mat[i].next;
while ( c != NULL ) {
_sn_t *n = c->next;
free(c);
c = n;
}
}
free(ll_mat);
}
/* Copy basic info from B to A */
void hb_init_basic(hbmat_t *A, hbmat_t *B)
{
hb_reset(A);
int M = B->m;
int elemc = B->elemc;
A->m = A->n = M;
A->elemc = elemc;
}
void hb_free(hbmat_t *A)
{
free(A->vptr);
free(A->vpos);
free(A->vval);
free(A->vdiag);
free(A->e_tree);
free(A);
}
void* __hb2hbh_block(int I, int J, hbmat_t *A, int b, hbmat_t *Bp)
{
int alloc = Bp == NULL;
if ( b < 0 ) {
fprintf(stderr, "err: b must be positive\n");
return NULL;
}
int m = A->m; int n = A->n;
int* vptr = A->vptr;
int* vpos = A->vpos;
double* vval = A->vval;
int offs = vptr[0] == 0 ? 0 : 1;
int csr = 1;//hb_CSR(A);
int brow = I*b;
int bcol = J*b;
int rleft = m - brow;
int cleft = n - bcol;
int rows = b > rleft ? rleft : b;
int cols = b > cleft ? cleft : b;
int erow = brow + rows;
int ecol = bcol + cols;
int dimb = csr ? brow : bcol;
int dime = csr ? erow : ecol;
int rngb = csr ? bcol : brow;
int rnge = csr ? ecol : erow;
vector_t* ab_vptr = vector_create();
vector_t* ab_vpos = vector_create();
vector_t* ab_vval = vector_create();
vel_t vel;
int L;
for ( L = dimb; L < dime; ++L ) {
vel.i = ab_vpos->elemc + offs;
vector_insert(ab_vptr, vel);
int k;
for ( k = vptr[L]; k < vptr[L+1]; ++k ) {
int lk = k - offs;
int c = vpos[lk] - offs;
if ( c >= rngb && c < rnge ) {
vel.i = c - rngb + offs;
vector_insert(ab_vpos, vel);
vel.d = vval[lk];
vector_insert(ab_vval, vel);
}
}
}
vel.i = ab_vpos->elemc + offs;
vector_insert(ab_vptr, vel);
if ( alloc ) {
Bp = malloc(sizeof(hbmat_t));
hb_reset(Bp);
}
if ( ab_vpos->elemc ) {
Bp->m = rows;
Bp->n = cols;
Bp->elemc = ab_vpos->elemc;
Bp->vdiag = NULL;
Bp->vptr = vector2int(ab_vptr);
Bp->vpos = vector2int(ab_vpos);
Bp->vval = vector2double(ab_vval);
} else {
vector_free(ab_vptr);
vector_free(ab_vpos);
vector_free(ab_vval);
return NULL;
}
return Bp;
}
hbmat_t* hb2hbh(hbmat_t *A, int b, int is_csr)
{
int m = A->m;
int n = A->n;
int elemc = A->elemc;
int *vptr = A->vptr;
int *vpos = A->vpos;
double* vval = A->vval;
int M = (m+b-1) / b;
int N = (n+b-1) / b;
int num = M * N;
int offs = vptr[0] == 0 ? 0 : 1;
hbmat_t* hyper = malloc(sizeof(hbmat_t));
hb_reset(hyper);
hyper->m = M; hyper->n = N; hyper->vdiag = NULL;
hyper->orig = A;
hyper->vval = malloc(num * sizeof(hbmat_t*));
hbmat_t** hbmat_array = malloc(num * sizeof(hbmat_t*));
vector_t* ab_vptr = vector_create();
vector_t* ab_vpos = vector_create();
vel_t pos_val;
int acc0 = 0;
int acc = 0;
int I, J;
if ( is_csr ) {
for ( I = 0; I < M; ++I ) {
pos_val.i = ab_vpos->elemc + offs;
vector_insert(ab_vptr, pos_val);
for ( J = 0; J < N; ++J ) {
hbmat_t *B = __hb2hbh_block(I, J, A, b, NULL);
if ( B != NULL ) {
pos_val.i = J + offs;
vector_insert(ab_vpos, pos_val);
((hbmat_t**)hyper->vval)[acc0] = B;
++acc0;
}
++acc;
}
}
} else {
printf("warn: hb2hbh for csc not yet implemented\n");
}
pos_val.i = ab_vpos->elemc + offs;
vector_insert(ab_vptr, pos_val);
hyper->elemc = ab_vpos->elemc;
hyper->vptr = vector2int(ab_vptr);
hyper->vpos = vector2int(ab_vpos);
// hb_setdiag(hyper);
return hyper;
}
/* Construct an array of block diagonal submatrices */
void hb_sym_diag_block(hbmat_t *src_mat, int bsze, hbmat_t *diagb)
{
/* Assuming CSR */
int m = src_mat->m;
/* Number of subblocks */
int bs = (m+bsze-1)/bsze;
int *svptr = src_mat->vptr; int *svpos = src_mat->vpos;
double *svval = src_mat->vval;
int i;
/* Loop for generating all the diagonal blocks*/
for ( i = 0; i < bs; i++ ) {
hbmat_t *d = &diagb[i];
int elemc = 0;
int brow = i*bsze; int erow = brow+bsze;
erow = erow > m ? m : erow;
int dim = erow - brow;
d->m = d->n = dim;
// Allocate individual HB structures
// Note that vpos and vval size are over-estimated
int *vptr = malloc((dim+1) * sizeof(int));
int esze = (svptr[erow] - svptr[brow]);
int *vpos = malloc(esze * sizeof(int));
double *vval = malloc(esze * sizeof(double));
int idx;
int row;
/* Traverse through rows */
for ( row = brow, idx = 0; row < erow; row++ ,idx++) {
vptr[idx] = elemc;
int pos = svptr[row]; int epos = svptr[row+1];
while ( pos < epos ) {
int col = svpos[pos];
/* Only take the lower triangular part of the matrix */
// if ( col >= row && col < erow ) { //Upper
// if ( col >= brow && col < row ) { //Lower
if ( col >= brow && col < erow ) { //Complete
vpos[elemc] = col - brow;
vval[elemc] = svval[pos];
elemc++;
}
pos++;
}
}
vptr[idx] = elemc;
d->elemc = elemc;
d->vptr = vptr;
//FIXME using realloc to reduce memory consumption
d->vpos = vpos;
d->vval = vval;
//TODO Remove verifications
// hb_sanity_check("A_hb", d, 0);
// assert(idx == dim);
// assert(d->vptr != NULL && d->vpos != NULL && d->vval != NULL);
}
}
/* Block diagonal (non-split) */
void hb_sym_diag(hbmat_t *src_mat, int bsze, hbmat_t *d)
{
/* Assuming CSR */
int m = src_mat->m;
int bs = (m+bsze-1)/bsze;
int *svptr = src_mat->vptr;
int *svpos = src_mat->vpos;
double *svval = src_mat->vval;
d->m = d->n = m;
int elemc = d->elemc = 0;
d->vptr = malloc((m+1) * sizeof(int));
d->vpos = malloc(src_mat->elemc * sizeof(int));
d->vval = malloc(src_mat->elemc * sizeof(double));
int *dvptr = d->vptr;
int *dvpos = d->vpos;
double *dvval = d->vval;
int idx = 0;
int i;
for ( i = 0; i < bs; i++ ) {
int brow = i*bsze;
int erow = brow+bsze;
erow = erow > m ? m : erow;
int dim = erow - brow;
// int esze = (svptr[erow] - svptr[brow]);
// int idx;
int row;
/* Traverse through rows */
for ( row = brow; row < erow; row++) {
dvptr[idx] = elemc;
idx += 1;
int pos = svptr[row];
int epos = svptr[row+1];
while ( pos < epos ) {
int col = svpos[pos];
/* Only take the lower triangular part of the matrix */
// if ( col >= row && col < erow ) { //Upper
// if ( col >= brow && col < row ) { //Lower
if ( col >= brow && col < erow ) { //Complete
//TODO Verify
// vpos[elemc] = col - brow;
dvpos[elemc] = col;
dvval[elemc] = svval[pos];
elemc++;
}
pos++;
}
}
dvptr[idx] = elemc;
d->elemc = elemc;
d->vptr = dvptr;
d->vpos = dvpos;
d->vval = dvval;
}
// printf("m %d n %d elemc : %d\n", d->m, d->n, d->elemc);
// for(int i = 0; i < m; i++ ){
// printf("[%d]: %d ", i, dvptr[i]);
// }
// printf("\n\n");
// for(int i = 0; i < elemc; i++ ){
// printf("[%d]: %d ", i, dvpos[i]);
// }
// printf("\n\n");
// for(int i = 0; i < elemc; i++ ){
// printf("[%d]: %E ", i, dvval[i]);
// }
// printf("\n\n");
}
int read_mm2dense(FILE *f, int m, int n, double *A)
{
char buf[1024];
double el;
int i = 0;
while ( fgets(buf, sizeof(buf), f) != NULL && i < m) {
if ( buf[0] != '#' ) {
sscanf(buf, FP_SCANSPEC, &el);
*A++ = el;
++i;
}
}
return 0;
}
// column-major
void print_dense2mm(FILE *f, const char *name, int m, int n, const double *A, int lda)
{
printf("warning: writing obj %s\n", name);
fprintf(f, "# name: %s\n", name);
fprintf(f, "# type: matrix\n");
fprintf(f, "# rows: %i\n", m);
fprintf(f, "# columns: %i\n", n);
int i;
for ( i=0; i<m; ++i ) {
int j;
for ( j=0; j<n; ++j ) {
fprintf(f, "%.16e \n", A[j*lda+i]);
// fprintf(f, "\n");
}
// fprintf(f, "\n");
}
}
void fprint_dense2mm(const char *fname, const char *name, int m, int n, const double *A, int lda)
{
FILE *f = fopen(fname, "w");
if ( f == NULL ) {
fprintf(stderr, "err: cannot open %s for writing\n", fname);
}
print_dense2mm(f, name, m, n, A, lda);
fclose(f);
}
/*
* BLAS/LAPACK task wrappers
* */
void __t_copy(int p, int bm, int bn, int m, int n, double *x, double *y, int initx, int inity)
{
double *X = &x[initx];
double *Y = &y[inity];
int i_one = 1;
int j;
for ( j=0; j<bn; ++j ) {
BLAS_cp(bm, &X[j*m], i_one, &Y[j*m], i_one);
}
}
void __t_dot(int p, int bm, int bn, int m, int n, double *x, double *y, int initx, int inity, double *result)
{
double *X = &x[initx];
double *Y = &y[inity];
int i_one = 1;
double local_result[bn];
double fp_one = 1.0;
int j;
for ( j=0; j<bn; ++j ) {
local_result[j] = BLAS_dot(bm, X, i_one, Y, i_one);
X += m;
Y += m;
}
#pragma omp critical
{
BLAS_axpy(bn, fp_one, local_result, i_one, result, i_one);
}
}
void __t_dot_array(int p, int bm, int bn, int m, int n, double *x, double *y, int initx, int inity, double *result, int initr)
{
double *X = &x[initx];
double *Y = &y[inity];
int i_one = 1;
double local_result[bn];
double fp_one = 1.0;
int j;
for ( j=0; j<bn; ++j ) {
result[initr+j] = BLAS_dot(bm, X, i_one, Y, i_one);
X += m;
Y += m;
}
}
void _cg_dot2_array(int p, int bm, int bn, int m, int n, double *x, double *y, int initx, int inity, double *result, int initr, double *a, double *b, int inita, int initb, double *result2, int initr2)
{
double *X = &x[initx];
double *Y = &y[inity];
double *A = &a[inita];
double *B = &b[initb];
//double fp_one = 1.0;
int i_one = 1;
for ( int j=0; j<bn; ++j ) {
result[initr+j] = BLAS_dot(bm, X, i_one, Y, i_one);
X += m;
Y += m;
}
int j;
for ( int j=0; j<bn; ++j ) {
result2[initr2+j] = BLAS_dot(bm, A, i_one, B, i_one);
A += m;
B += m;
}
}
void _cg_dot2(int p, int bm, int bn, int m, int n, double *x, double *y, int initx, int inity, double *result, double *a, double *b, int inita, int initb, double *result2)
{
double *X = &x[initx];
double *Y = &y[inity];
double *A = &a[inita];
double *B = &b[initb];
double fp_one = 1.0;
int i_one = 1;
double local_result[bn];
for ( int j=0; j<bn; ++j ) {
local_result[j] = BLAS_dot(bm, X, i_one, Y, i_one);
X += m;
Y += m;
}
double local_result2[bn];
int j;
for ( int j=0; j<bn; ++j ) {
local_result2[j] = BLAS_dot(bm, A, i_one, B, i_one);
A += m;
B += m;
}
#pragma omp critical
{
BLAS_axpy(bn, fp_one, local_result, i_one, result, i_one);
BLAS_axpy(bn, fp_one, local_result2, i_one, result2, i_one);
}
}
void __t_cpaxpy_comb(int bm, int bn, int m, int n, double alpha, double *Anum, double *Aden, double *X1, double *X2, double *Y1, double *Y2, double *Z1, double *Z2)
{
int i_one = 1;
int j;
for ( j=0; j<bn; ++j) {
/* update of x */
double factor = Anum[j] / Aden[j];
BLAS_cp(bm, Y2, i_one, Z2, i_one);
BLAS_axpy(bm, factor, X2, i_one, Z2, i_one);
X2 += m;
Y2 += m;
Z2 += m;
/* update of r */
factor = alpha * factor;
BLAS_cp(bm, Y1, i_one, Z1, i_one);
BLAS_axpy(bm, factor, X1, i_one, Z1, i_one);
X1 += m;
Y1 += m;
Z1 += m;
}
}
void __t_extm_axpy(int bm, int bn, int m, int n, double *SAnum, double *SAden, double *X, double *Y, double *Z, int p)
{
int i_one = 1;
int j;
for ( j=0; j<bn; ++j) {
double f = SAnum[j] / SAden[j];
BLAS_cp(bm, &Y[j*m], i_one, &Z[j*m], i_one);
BLAS_axpy(bm, f, &X[j*m], i_one, &Z[j*m], i_one);
}
}
/* Non-optimal implementation of the mkl_csrmv when it is not present */
void manual_csrmv(char *trans, int m, int n, double alpha, double *avval, int *avpos, int *avptr, double *Bptr, double beta, double *Cptr)
{
if ( strcmp(trans, "N") || strcmp(trans, "n") ) {
for ( int i = 0; i < m; i++ ) {
double c = Cptr[i];
c = beta * c;
for ( int v = avptr[i]; v < avptr[i+1]; v++ ) {
int pos = avpos[v];
double val = avval[v];
c += alpha * val * Bptr[pos];
}
Cptr[i] = c;
}
} else if ( strcmp(trans, "T") || strcmp(trans, "t") ) {
int count = 0;
for ( int i = 0; i < n; i++, count++ ) {
for ( int v = avptr[i]; v < avptr[i+1]; v++ ) {
int pos = avpos[v];
double val = avval[v];
if ( ! count ) {
Cptr[pos] = alpha * val * Bptr[i] + beta * Cptr[pos];
} else {
Cptr[pos] += alpha * val * Bptr[i];
}
}
}
}
}
|
FlopCounterFunctor.h | /**
* @file FlopCounterFunctor.h
*
* @date 22 Jan 2018
* @author tchipevn
*/
#pragma once
#include "autopas/pairwiseFunctors/Functor.h"
#include "autopas/utils/ArrayMath.h"
namespace autopas {
/**
* This class helps in getting the number of performed floating point
* operations. It is a functor that only calculated the amount of floating point
* operations.
* @todo this class currently is limited to the following case:
* - constant cutoff radius
* - constant amount of floating point operations for one kernel call (distance < cutoff)
* @tparam Particle
* @tparam ParticleCell
*/
template <class Particle>
class FlopCounterFunctor : public Functor<Particle, FlopCounterFunctor<Particle>> {
public:
bool isRelevantForTuning() override { return false; }
bool allowsNewton3() override { return true; }
bool allowsNonNewton3() override { return true; }
/**
* constructor of FlopCounterFunctor
* @param cutoffRadius the cutoff radius
*/
explicit FlopCounterFunctor<Particle>(double cutoffRadius)
: autopas::Functor<Particle, FlopCounterFunctor<Particle>>(cutoffRadius),
_cutoffSquare(cutoffRadius * cutoffRadius),
_distanceCalculations(0ul),
_kernelCalls(0ul) {}
void AoSFunctor(Particle &i, Particle &j, bool /*newton3*/) override {
if (i.isDummy() or j.isDummy()) {
return;
}
auto dr = utils::ArrayMath::sub(i.getR(), j.getR());
double dr2 = utils::ArrayMath::dot(dr, dr);
_distanceCalculations.fetch_add(1, std::memory_order_relaxed);
if (dr2 <= _cutoffSquare) {
_kernelCalls.fetch_add(1, std::memory_order_relaxed);
}
}
/**
* See Functor::SoAFunctorSingle()
* @param soa
*/
void SoAFunctorSingle(SoAView<typename Particle::SoAArraysType> soa, bool /*newton3*/) override {
if (soa.getNumParticles() == 0) return;
double *const __restrict x1ptr = soa.template begin<Particle::AttributeNames::posX>();
double *const __restrict y1ptr = soa.template begin<Particle::AttributeNames::posY>();
double *const __restrict z1ptr = soa.template begin<Particle::AttributeNames::posZ>();
for (size_t i = 0; i < soa.getNumParticles(); ++i) {
size_t distanceCalculationsAcc = 0;
size_t kernelCallsAcc = 0;
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc)
for (size_t j = i + 1; j < soa.getNumParticles(); ++j) {
++distanceCalculationsAcc;
const double drx = x1ptr[i] - x1ptr[j];
const double dry = y1ptr[i] - y1ptr[j];
const double drz = z1ptr[i] - z1ptr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
if (dr2 <= _cutoffSquare) {
++kernelCallsAcc;
}
}
_distanceCalculations.fetch_add(distanceCalculationsAcc, std::memory_order_relaxed);
_kernelCalls.fetch_add(kernelCallsAcc, std::memory_order_relaxed);
}
}
/**
* See Functor::SoAFunctorPair()
* @param soa1
* @param soa2
*/
void SoAFunctorPair(SoAView<typename Particle::SoAArraysType> soa1, SoAView<typename Particle::SoAArraysType> soa2,
bool /*newton3*/) override {
double *const __restrict x1ptr = soa1.template begin<Particle::AttributeNames::posX>();
double *const __restrict y1ptr = soa1.template begin<Particle::AttributeNames::posY>();
double *const __restrict z1ptr = soa1.template begin<Particle::AttributeNames::posZ>();
double *const __restrict x2ptr = soa2.template begin<Particle::AttributeNames::posX>();
double *const __restrict y2ptr = soa2.template begin<Particle::AttributeNames::posY>();
double *const __restrict z2ptr = soa2.template begin<Particle::AttributeNames::posZ>();
for (size_t i = 0; i < soa1.getNumParticles(); ++i) {
size_t distanceCalculationsAcc = 0;
size_t kernelCallsAcc = 0;
// icpc vectorizes this.
// g++ only with -ffast-math or -funsafe-math-optimizations
#pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc)
for (size_t j = 0; j < soa2.getNumParticles(); ++j) {
++distanceCalculationsAcc;
const double drx = x1ptr[i] - x2ptr[j];
const double dry = y1ptr[i] - y2ptr[j];
const double drz = z1ptr[i] - z2ptr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
if (dr2 <= _cutoffSquare) {
++kernelCallsAcc;
}
}
_distanceCalculations.fetch_add(distanceCalculationsAcc, std::memory_order_relaxed);
_kernelCalls.fetch_add(kernelCallsAcc, std::memory_order_relaxed);
}
}
/**
* See Functor::SoAFunctorVerlet()
* @param soa
* @param indexFirst
* @param neighborList
*/
void SoAFunctorVerlet(SoAView<typename Particle::SoAArraysType> soa, const size_t indexFirst,
const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList,
bool /*newton3*/) override {
auto numParts = soa.getNumParticles();
if (numParts == 0) return;
double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>();
double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>();
double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>();
const size_t listSizeI = neighborList.size();
const size_t *const __restrict currentList = neighborList.data();
// this is a magic number, that should correspond to at least
// vectorization width*N have testet multiple sizes:
// 4: small speedup compared to AoS
// 8: small speedup compared to AoS
// 12: small but best speedup compared to Aos
// 16: smaller speedup
// in theory this is a variable, we could auto-tune over...
#ifdef __AVX512F__
// use a multiple of 8 for avx
const size_t vecsize = 16;
#else
// for everything else 12 is faster
const size_t vecsize = 12;
#endif
size_t joff = 0;
// if the size of the verlet list is larger than the given size vecsize,
// we will use a vectorized version.
if (listSizeI >= vecsize) {
alignas(64) std::array<double, vecsize> xtmp{}, ytmp{}, ztmp{}, xArr{}, yArr{}, zArr{};
// broadcast of the position of particle i
for (size_t tmpj = 0; tmpj < vecsize; tmpj++) {
xtmp[tmpj] = xptr[indexFirst];
ytmp[tmpj] = yptr[indexFirst];
ztmp[tmpj] = zptr[indexFirst];
}
// loop over the verlet list from 0 to x*vecsize
for (; joff < listSizeI - vecsize + 1; joff += vecsize) {
size_t distanceCalculationsAcc = 0;
size_t kernelCallsAcc = 0;
// in each iteration we calculate the interactions of particle i with
// vecsize particles in the neighborlist of particle i starting at
// particle joff
// gather position of particle j
#pragma omp simd safelen(vecsize)
for (size_t tmpj = 0; tmpj < vecsize; tmpj++) {
xArr[tmpj] = xptr[currentList[joff + tmpj]];
yArr[tmpj] = yptr[currentList[joff + tmpj]];
zArr[tmpj] = zptr[currentList[joff + tmpj]];
}
// do omp simd with reduction of the interaction
#pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc) safelen(vecsize)
for (size_t j = 0; j < vecsize; j++) {
++distanceCalculationsAcc;
const double drx = xtmp[j] - xArr[j];
const double dry = ytmp[j] - yArr[j];
const double drz = ztmp[j] - zArr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
const unsigned long mask = (dr2 <= _cutoffSquare) ? 1 : 0;
kernelCallsAcc += mask;
}
_distanceCalculations.fetch_add(distanceCalculationsAcc, std::memory_order_relaxed);
_kernelCalls.fetch_add(kernelCallsAcc, std::memory_order_relaxed);
}
}
size_t distanceCalculationsAcc = 0;
size_t kernelCallsAcc = 0;
// this loop goes over the remainder and uses no optimizations
for (size_t jNeighIndex = joff; jNeighIndex < listSizeI; ++jNeighIndex) {
size_t j = neighborList[jNeighIndex];
if (indexFirst == j) continue;
++distanceCalculationsAcc;
const double drx = xptr[indexFirst] - xptr[j];
const double dry = yptr[indexFirst] - yptr[j];
const double drz = zptr[indexFirst] - zptr[j];
const double drx2 = drx * drx;
const double dry2 = dry * dry;
const double drz2 = drz * drz;
const double dr2 = drx2 + dry2 + drz2;
if (dr2 <= _cutoffSquare) {
++kernelCallsAcc;
}
}
_distanceCalculations.fetch_add(distanceCalculationsAcc, std::memory_order_relaxed);
_kernelCalls.fetch_add(kernelCallsAcc, std::memory_order_relaxed);
}
/**
* @copydoc Functor::getNeededAttr()
*/
constexpr static std::array<typename Particle::AttributeNames, 3> getNeededAttr() {
return std::array<typename Particle::AttributeNames, 3>{
Particle::AttributeNames::posX, Particle::AttributeNames::posY, Particle::AttributeNames::posZ};
}
/**
* @copydoc Functor::getNeededAttr(std::false_type)
*/
constexpr static std::array<typename Particle::AttributeNames, 3> getNeededAttr(std::false_type) {
return getNeededAttr();
}
/**
* @copydoc Functor::getComputedAttr()
*/
constexpr static std::array<typename Particle::AttributeNames, 0> getComputedAttr() {
return std::array<typename Particle::AttributeNames, 0>{/*Nothing*/};
}
/**
* get the hit rate of the pair-wise interaction, i.e. the ratio of the number
* of kernel calls compared to the number of distance calculations
* @return the hit rate
*/
double getHitRate() { return static_cast<double>(_kernelCalls) / static_cast<double>(_distanceCalculations); }
/**
* get the total number of flops
* @param numFlopsPerKernelCall
* @return
*/
[[nodiscard]] size_t getFlops(size_t numFlopsPerKernelCall) const {
const auto distFlops = numFlopsPerDistanceCalculation * _distanceCalculations;
const auto kernFlops = numFlopsPerKernelCall * _kernelCalls;
return distFlops + kernFlops;
}
/**
* get the number of calculated distance operations
* @return
*/
[[nodiscard]] size_t getDistanceCalculations() const { return _distanceCalculations; }
/**
* get the number of kernel calls, i.e. the number of pairs of particles with
* a distance not larger than the cutoff
* @return
*/
[[nodiscard]] size_t getKernelCalls() const { return _kernelCalls; }
/**
* number of flops for one distance calculation.
* 3 sub + 3 square + 2 add
*/
static constexpr double numFlopsPerDistanceCalculation = 8.0;
private:
double _cutoffSquare;
std::atomic<size_t> _distanceCalculations, _kernelCalls;
};
} // namespace autopas
|
GB_binop__isge_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isge_fp64
// A.*B function (eWiseMult): GB_AemultB__isge_fp64
// A*D function (colscale): GB_AxD__isge_fp64
// D*A function (rowscale): GB_DxB__isge_fp64
// C+=B function (dense accum): GB_Cdense_accumB__isge_fp64
// C+=b function (dense accum): GB_Cdense_accumb__isge_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_fp64
// C=scalar+B GB_bind1st__isge_fp64
// C=scalar+B' GB_bind1st_tran__isge_fp64
// C=A+scalar GB_bind2nd__isge_fp64
// C=A'+scalar GB_bind2nd_tran__isge_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_FP64 || GxB_NO_ISGE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isge_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isge_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isge_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isge_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isge_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isge_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isge_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isge_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isge_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__isge_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__isge_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_uint16_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_fp64
// op(A') function: GB_tran__lnot_uint16_fp64
// C type: uint16_t
// A type: double
// cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
double
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z ; GB_CAST_UNSIGNED(z,aij,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_fp64
(
uint16_t *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
vmbpush2.c | /* C Library for Skeleton 2-1/2D Electromagnetic OpenMP/Vector PIC */
/* Code */
/* written by Viktor K. Decyk, UCLA */
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include <math.h>
#include "vmbpush2.h"
/*--------------------------------------------------------------------*/
double ranorm() {
/* this program calculates a random number y from a gaussian distribution
with zero mean and unit variance, according to the method of
mueller and box:
y(k) = (-2*ln(x(k)))**1/2*sin(2*pi*x(k+1))
y(k+1) = (-2*ln(x(k)))**1/2*cos(2*pi*x(k+1)),
where x is a random number uniformly distributed on (0,1).
written for the ibm by viktor k. decyk, ucla
local data */
static int r1 = 885098780, r2 = 1824280461;
static int r4 = 1396483093, r5 = 55318673;
static int iflg = 0;
static double h1l = 65531.0, h1u = 32767.0, h2l = 65525.0;
static double r0 = 0.0;
int isc, i1;
double ranorm, r3, asc, bsc, temp;
if (iflg==1) {
ranorm = r0;
r0 = 0.0;
iflg = 0;
return ranorm;
}
isc = 65536;
asc = (double) isc;
bsc = asc*asc;
i1 = r1 - (r1/isc)*isc;
r3 = h1l*(double) r1 + asc*h1u*(double) i1;
i1 = r3/bsc;
r3 -= ((double) i1)*bsc;
bsc = 0.5*bsc;
i1 = r2/isc;
isc = r2 - i1*isc;
r0 = h1l*(double) r2 + asc*h1u*(double) isc;
asc = 1.0/bsc;
isc = r0*asc;
r2 = r0 - ((double) isc)*bsc;
r3 += (double) isc + 2.0*h1u*(double) i1;
isc = r3*asc;
r1 = r3 - ((double) isc)*bsc;
temp = sqrt(-2.0*log((((double) r1) + ((double) r2)*asc)*asc));
isc = 65536;
asc = (double) isc;
bsc = asc*asc;
i1 = r4 - (r4/isc)*isc;
r3 = h2l*(double) r4 + asc*h1u*(double) i1;
i1 = r3/bsc;
r3 -= ((double) i1)*bsc;
bsc = 0.5*bsc;
i1 = r5/isc;
isc = r5 - i1*isc;
r0 = h2l*(double) r5 + asc*h1u*(double) isc;
asc = 1.0/bsc;
isc = r0*asc;
r5 = r0 - ((double) isc)*bsc;
r3 += (double) isc + 2.0*h1u*(double) i1;
isc = r3*asc;
r4 = r3 - ((double) isc)*bsc;
r0 = 6.28318530717959*((((double) r4) + ((double) r5)*asc)*asc);
ranorm = temp*sin(r0);
r0 = temp*cos(r0);
iflg = 1;
return ranorm;
}
/*--------------------------------------------------------------------*/
void cdistr2h(float part[], float vtx, float vty, float vtz, float vdx,
float vdy, float vdz, int npx, int npy, int idimp, int nop,
int nx, int ny, int ipbc) {
/* for 2-1/2d code, this subroutine calculates initial particle
co-ordinates and velocities with uniform density and maxwellian
velocity with drift
part[n][0] = position x of particle n
part[n][1] = position y of particle n
part[n][2] = velocity vx of particle n
part[n][3] = velocity vy of particle n
part[n][4] = velocity vz of particle n
vtx/vty/vtz = thermal velocity of electrons in x/y/z direction
vdx/vdy/vdz = drift velocity of beam electrons in x/y/z direction
npx/npy = initial number of particles distributed in x/y direction
idimp = size of phase space = 5
nop = number of particles
nx/ny = system length in x/y direction
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
ranorm = gaussian random number with zero mean and unit variance
local data */
int j, k, k1, npxy;
float edgelx, edgely, at1, at2, at3, sum1, sum2, sum3;
double dsum1, dsum2, dsum3;
npxy = npx*npy;
/* set boundary values */
edgelx = 0.0;
edgely = 0.0;
at1 = (float) nx/(float) npx;
at2 = (float) ny/(float) npy;
if (ipbc==2) {
edgelx = 1.0;
edgely = 1.0;
at1 = (float) (nx-2)/(float) npx;
at2 = (float) (ny-2)/(float) npy;
}
else if (ipbc==3) {
edgelx = 1.0;
edgely = 0.0;
at1 = (float) (nx-2)/(float) npx;
at2 = (float) ny/(float) npy;
}
/* uniform density profile */
for (k = 0; k < npy; k++) {
k1 = idimp*npx*k;
at3 = edgely + at2*(((float) k) + 0.5);
for (j = 0; j < npx; j++) {
part[idimp*j+k1] = edgelx + at1*(((float) j) + 0.5);
part[1+idimp*j+k1] = at3;
}
}
/* maxwellian velocity distribution */
for (j = 0; j < npxy; j++) {
part[2+idimp*j] = vtx*ranorm();
part[3+idimp*j] = vty*ranorm();
part[4+idimp*j] = vtz*ranorm();
}
/* add correct drift */
dsum1 = 0.0;
dsum2 = 0.0;
dsum3 = 0.0;
for (j = 0; j < npxy; j++) {
dsum1 += part[2+idimp*j];
dsum2 += part[3+idimp*j];
dsum3 += part[4+idimp*j];
}
sum1 = dsum1;
sum2 = dsum2;
sum3 = dsum3;
at1 = 1.0/(float) npxy;
sum1 = at1*sum1 - vdx;
sum2 = at1*sum2 - vdy;
sum3 = at1*sum3 - vdz;
for (j = 0; j < npxy; j++) {
part[2+idimp*j] -= sum1;
part[3+idimp*j] -= sum2;
part[4+idimp*j] -= sum3;
}
return;
}
/*--------------------------------------------------------------------*/
void cdblkp2l(float part[], int kpic[], int *nppmx, int idimp, int nop,
int mx, int my, int mx1, int mxy1, int *irc) {
/* this subroutine finds the maximum number of particles in each tile of
mx, my to calculate size of segmented particle array ppart
linear interpolation
part = input particle array
part[n][0] = position x of particle n
part[n][1] = position y of particle n
kpic = output number of particles per tile
nppmx = return maximum number of particles in tile
idimp = size of phase space = 4
nop = number of particles
mx/my = number of grids in sorting cell in x and y
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int j, k, n, m, isum, ist, npx, ierr;
ierr = 0;
/* clear counter array */
for (k = 0; k < mxy1; k++) {
kpic[k] = 0;
}
/* find how many particles in each tile */
for (j = 0; j < nop; j++) {
n = part[idimp*j];
m = part[1+idimp*j];
n = n/mx;
m = m/my;
m = n + mx1*m;
if (m < mxy1) {
kpic[m] += 1;
}
else {
ierr = ierr > (m - mxy1 + 1) ? ierr : (m - mxy1 + 1);
}
}
/* find maximum */
isum = 0;
npx = 0;
for (k = 0; k < mxy1; k++) {
ist = kpic[k];
npx = npx > ist ? npx : ist;
isum += ist;
}
*nppmx = npx;
/* check for errors */
if (ierr > 0) {
*irc = ierr;
}
else if (isum != nop) {
*irc = -1;
}
return;
}
/*--------------------------------------------------------------------*/
void cppmovin2lt(float part[], float ppart[], int kpic[], int nppmx,
int idimp, int nop, int mx, int my, int mx1, int mxy1,
int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
and copies to segmented array ppart
linear interpolation
input: all except ppart, kpic, output: ppart, kpic
part/ppart = input/output particle arrays
part[n][0] = position x of particle n in partition
part[n][1] = position y of particle n in partition
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
kpic = output number of particles per tile
nppmx = maximum number of particles in tile
idimp = size of phase space = 4
nop = number of particles
mx/my = number of grids in sorting cell in x and y
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int i, j, k, n, m, ip, ierr;
ierr = 0;
/* clear counter array */
for (k = 0; k < mxy1; k++) {
kpic[k] = 0;
}
/* find addresses of particles at each tile and reorder particles */
for (j = 0; j < nop; j++) {
n = part[idimp*j];
m = part[1+idimp*j];
n = n/mx;
m = m/my;
m = n + mx1*m;
ip = kpic[m];
if (ip < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[ip+nppmx*(i+idimp*m)] = part[i+idimp*j];
}
}
else {
ierr = ierr > ip-nppmx+1 ? ierr : ip-nppmx+1;
}
kpic[m] = ip + 1;
}
if (ierr > 0)
*irc = ierr;
return;
}
/*--------------------------------------------------------------------*/
void cppmovin2ltp(float part[], float ppart[], int kpic[], int kp[],
int nppmx, int idimp, int nop, int mx, int my,
int mx1, int mxy1, int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
and copies to segmented array ppart
designed for NUMA architectures, where memory is associated with the
processor which first writes a memory location.
linear interpolation
input: all except ppart, kpic, output: ppart, kpic
part/ppart = input/output particle arrays
part[n][0] = position x of particle n in partition
part[n][1] = position y of particle n in partition
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
kpic = output number of particles per tile
kp = original location of reordered particle
nppmx = maximum number of particles in tile
idimp = size of phase space = 4
nop = number of particles
mx/my = number of grids in sorting cell in x and y
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int i, j, k, n, m, ip, npp, ierr;
ierr = 0;
/* clear counter array */
for (k = 0; k < mxy1; k++) {
kpic[k] = 0;
}
/* find addresses of particles at each tile to reorder particles */
for (j = 0; j < nop; j++) {
n = part[idimp*j];
m = part[1+idimp*j];
n = n/mx;
m = m/my;
m = n + mx1*m;
ip = kpic[m];
if (ip < nppmx) {
kp[ip+nppmx*m] = j;
}
else {
ierr = ierr > ip-nppmx+1 ? ierr : ip-nppmx+1;
}
kpic[m] = ip + 1;
}
/* check for overflow */
if (ierr > 0) {
*irc = ierr;
return;
}
/* copy reordered particles */
#pragma omp parallel for private(i,j,k,m,npp)
for (k = 0; k < mxy1; k++) {
npp = kpic[k];
for (j = 0; j < npp; j++) {
m = kp[j+nppmx*k];
for (i = 0; i < idimp; i++) {
ppart[j+nppmx*(i+idimp*k)] = part[i+idimp*m];
}
}
}
return;
}
/*--------------------------------------------------------------------*/
void cppcheck2lt(float ppart[], int kpic[], int idimp, int nppmx, int nx,
int ny, int mx, int my, int mx1, int my1,
int *irc) {
/* this subroutine performs a sanity check to make sure particles sorted
by x,y grid in tiles of mx, my, are all within bounds.
tiles are assumed to be arranged in 2D linear memory, and transposed
input: all except irc
output: irc
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
kpic[k] = number of reordered output particles in tile k
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
irc = particle error, returned only if error occurs, when irc > 0
local data */
int mxy1, noff, moff, npp, j, k, ist, nn, mm;
float edgelx, edgely, edgerx, edgery, dx, dy;
mxy1 = mx1*my1;
/* loop over tiles */
#pragma omp parallel for \
private(j,k,noff,moff,npp,nn,mm,ist,edgelx,edgely,edgerx,edgery,dx,dy)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
dx = ppart[j+nppmx*(idimp*k)];
dy = ppart[j+nppmx*(1+idimp*k)];
/* find particles going out of bounds */
ist = 0;
if (dx < edgelx)
ist = 1;
if (dx >= edgerx)
ist = 2;
if (dy < edgely)
ist += 3;
if (dy >= edgery)
ist += 6;
if (ist > 0)
*irc = k + 1;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cgbppush23lt(float ppart[], float fxy[], float bxy[], int kpic[],
float qbm, float dt, float dtc, float *ek, int idimp,
int nppmx, int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
119 flops/particle, 1 divide, 29 loads, 5 stores
input: all, output: ppart, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x velocity of particle n in tile m
ppart[m][3][n] = y velocity of particle n in tile m
ppart[m][4][n] = z velocity of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
#define N 4
int noff, moff, npoff, npp, mxv;
int i, j, k, nn, mm, nm;
float qtmh, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float dx, dy, dz, ox, oy, oz, acx, acy, acz, omxt, omyt, omzt, omt;
float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y, vx, vy, vz;
float sfxy[N*MXV*MYV], sbxy[N*MXV*MYV];
/* float sfxy[N*(mx+1)*(my+1)], sbxy[N*(mx+1)*(my+1)]; */
double sum1, sum2;
mxv = mx + 1;
qtmh = 0.5f*qbm*dt;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,nm,x,y,vx,vy,vz,dxp,dyp,amx, \
amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1,rot2, \
rot3,rot4,rot5,rot6,rot7,rot8,rot9,sum1,sfxy,sbxy) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[N*(i+mxv*j)] = fxy[N*(i+noff+nxv*(j+moff))];
sfxy[1+N*(i+mxv*j)] = fxy[1+N*(i+noff+nxv*(j+moff))];
sfxy[2+N*(i+mxv*j)] = fxy[2+N*(i+noff+nxv*(j+moff))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[N*(i+mxv*j)] = bxy[N*(i+noff+nxv*(j+moff))];
sbxy[1+N*(i+mxv*j)] = bxy[1+N*(i+noff+nxv*(j+moff))];
sbxy[2+N*(i+mxv*j)] = bxy[2+N*(i+noff+nxv*(j+moff))];
}
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = N*(nn - noff + mxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + N;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += N*mxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + N;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + N;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += N*mxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + N;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+2*nppmx+npoff] + dx;
acy = ppart[j+3*nppmx+npoff] + dy;
acz = ppart[j+4*nppmx+npoff] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx;
vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy;
vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz;
/* new position */
dx = x + vx*dtc;
dy = y + vy*dtc;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* set new velocity */
ppart[j+2*nppmx+npoff] = vx;
ppart[j+3*nppmx+npoff] = vy;
ppart[j+4*nppmx+npoff] = vz;
}
sum2 += sum1;
}
/* normalize kinetic energy */
*ek += 0.5*sum2;
return;
#undef N
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgbppushf23lt(float ppart[], float fxy[], float bxy[], int kpic[],
int ncl[], int ihole[], float qbm, float dt,
float dtc, float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv, int mx1,
int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
with periodic boundary conditions.
also determines list of particles which are leaving this tile
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
119 flops/particle, 1 divide, 29 loads, 5 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x velocity of particle n in tile m
ppart[m][3][n] = y velocity of particle n in tile m
ppart[m][4][n] = z velocity of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
#define N 4
int noff, moff, npoff, npp, mxv;
int i, j, k, ih, nh, nn, mm, nm;
float qtmh, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz;
float acx, acy, acz, omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float anx, any, edgelx, edgely, edgerx, edgery;
float x, y, vx, vy, vz;
float sfxy[N*MXV*MYV], sbxy[N*MXV*MYV];
/* float sfxy[N*(mx+1)*(my+1)], sbxy[N*(mx+1)*(my+1)]; */
double sum1, sum2;
mxv = mx + 1;
qtmh = 0.5f*qbm*dt;
anx = (float) nx;
any = (float) ny;
sum2 = 0.0;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,nm,ih,nh,x,y,vx,vy,vz,dxp,dyp, \
amx,amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1, \
rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,edgelx,edgely,edgerx,edgery, \
sum1,sfxy,sbxy) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* load local fields from global array */
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[N*(i+mxv*j)] = fxy[N*(i+noff+nxv*(j+moff))];
sfxy[1+N*(i+mxv*j)] = fxy[1+N*(i+noff+nxv*(j+moff))];
sfxy[2+N*(i+mxv*j)] = fxy[2+N*(i+noff+nxv*(j+moff))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[N*(i+mxv*j)] = bxy[N*(i+noff+nxv*(j+moff))];
sbxy[1+N*(i+mxv*j)] = bxy[1+N*(i+noff+nxv*(j+moff))];
sbxy[2+N*(i+mxv*j)] = bxy[2+N*(i+noff+nxv*(j+moff))];
}
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = N*(nn - noff + mxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + N;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += N*mxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + N;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + N;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += N*mxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + N;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+2*nppmx+npoff] + dx;
acy = ppart[j+3*nppmx+npoff] + dy;
acz = ppart[j+4*nppmx+npoff] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx;
vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy;
vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz;
/* new position */
dx = x + vx*dtc;
dy = y + vy*dtc;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* set new velocity */
ppart[j+2*nppmx+npoff] = vx;
ppart[j+3*nppmx+npoff] = vy;
ppart[j+4*nppmx+npoff] = vz;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
sum2 += sum1;
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* normalize kinetic energy */
*ek += 0.5*sum2;
return;
#undef N
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgrbppush23lt(float ppart[], float fxy[], float bxy[], int kpic[],
float qbm, float dt, float dtc, float ci, float *ek,
int idimp, int nppmx, int nx, int ny, int mx, int my,
int nxv, int nyv, int mx1, int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
Using the Boris Mover.
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores
input: all, output: ppart, ek
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and
omz = (q/m)*bz(x(t),y(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprocal of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
#define N 4
int noff, moff, npoff, npp, mxv;
int i, j, k, nn, mm, nm;
float qtmh, ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float dx, dy, dz, ox, oy, oz, acx, acy, acz, p2, gami, qtmg, dtg;
float omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y, vx, vy, vz;
float sfxy[N*MXV*MYV], sbxy[N*MXV*MYV];
/* float sfxy[N*(mx+1)*(my+1)], sbxy[N*(mx+1)*(my+1)]; */
double sum1, sum2;
mxv = mx + 1;
qtmh = 0.5f*qbm*dt;
ci2 = ci*ci;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,nm,x,y,vx,vy,vz,dxp,dyp,amx, \
amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1,rot2, \
rot3,rot4,rot5,rot6,rot7,rot8,rot9,p2,gami,qtmg,dtg,sum1,sfxy,sbxy) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[N*(i+mxv*j)] = fxy[N*(i+noff+nxv*(j+moff))];
sfxy[1+N*(i+mxv*j)] = fxy[1+N*(i+noff+nxv*(j+moff))];
sfxy[2+N*(i+mxv*j)] = fxy[2+N*(i+noff+nxv*(j+moff))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[N*(i+mxv*j)] = bxy[N*(i+noff+nxv*(j+moff))];
sbxy[1+N*(i+mxv*j)] = bxy[1+N*(i+noff+nxv*(j+moff))];
sbxy[2+N*(i+mxv*j)] = bxy[2+N*(i+noff+nxv*(j+moff))];
}
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = N*(nn - noff + mxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + N;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += N*mxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + N;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + N;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += N*mxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + N;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+2*nppmx+npoff] + dx;
acy = ppart[j+3*nppmx+npoff] + dy;
acz = ppart[j+4*nppmx+npoff] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx;
vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy;
vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz;
/* update inverse gamma */
p2 = vx*vx + vy*vy + vz*vz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
dx = x + vx*dtg;
dy = y + vy*dtg;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* set new momentum */
ppart[j+2*nppmx+npoff] = vx;
ppart[j+3*nppmx+npoff] = vy;
ppart[j+4*nppmx+npoff] = vz;
}
sum2 += sum1;
}
/* normalize kinetic energy */
*ek += sum2;
return;
#undef N
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgrbppushf23lt(float ppart[], float fxy[], float bxy[], int kpic[],
int ncl[], int ihole[], float qbm, float dt,
float dtc, float ci, float *ek, int idimp,
int nppmx, int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
with periodic boundary conditions.
Using the Boris Mover.
also determines list of particles which are leaving this tile
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and
omz = (q/m)*bz(x(t),y(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprocal of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
#define N 4
int noff, moff, npoff, npp, mxv;
int i, j, k, ih, nh, nn, mm, nm;
float qtmh, ci2, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz;
float acx, acy, acz, p2, gami, qtmg, dtg, omxt, omyt, omzt, omt;
float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float anx, any, edgelx, edgely, edgerx, edgery;
float x, y, vx, vy, vz;
float sfxy[N*MXV*MYV], sbxy[N*MXV*MYV];
/* float sfxy[N*(mx+1)*(my+1)], sbxy[N*(mx+1)*(my+1)]; */
double sum1, sum2;
mxv = mx + 1;
qtmh = 0.5f*qbm*dt;
ci2 = ci*ci;
anx = (float) nx;
any = (float) ny;
sum2 = 0.0;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,nm,ih,nh,x,y,vx,vy,vz,dxp,dyp, \
amx,amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1, \
rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,edgelx,edgely,edgerx,edgery,p2, \
gami,qtmg,dtg,sum1,sfxy,sbxy) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* load local fields from global array */
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[N*(i+mxv*j)] = fxy[N*(i+noff+nxv*(j+moff))];
sfxy[1+N*(i+mxv*j)] = fxy[1+N*(i+noff+nxv*(j+moff))];
sfxy[2+N*(i+mxv*j)] = fxy[2+N*(i+noff+nxv*(j+moff))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[N*(i+mxv*j)] = bxy[N*(i+noff+nxv*(j+moff))];
sbxy[1+N*(i+mxv*j)] = bxy[1+N*(i+noff+nxv*(j+moff))];
sbxy[2+N*(i+mxv*j)] = bxy[2+N*(i+noff+nxv*(j+moff))];
}
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
sum1 = 0.0;
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = N*(nn - noff + mxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + N;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += N*mxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + N;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + N;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += N*mxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + N;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+2*nppmx+npoff] + dx;
acy = ppart[j+3*nppmx+npoff] + dy;
acz = ppart[j+4*nppmx+npoff] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new momentum */
vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx;
vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy;
vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz;
/* update inverse gamma */
p2 = vx*vx + vy*vy + vz*vz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
dx = x + vx*dtg;
dy = y + vy*dtg;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* set new momentum */
ppart[j+2*nppmx+npoff] = vx;
ppart[j+3*nppmx+npoff] = vy;
ppart[j+4*nppmx+npoff] = vz;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
sum2 += sum1;
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* normalize kinetic energy */
*ek += sum2;
return;
#undef N
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cvgbppush23lt(float ppart[], float fxy[], float bxy[], int kpic[],
float qbm, float dt, float dtc, float *ek, int idimp,
int nppmx, int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
vectorizable/OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
119 flops/particle, 1 divide, 29 loads, 5 stores
input: all, output: ppart, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x velocity of particle n in tile m
ppart[m][3][n] = y velocity of particle n in tile m
ppart[m][4][n] = z velocity of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
#define NPBLK 32
#define LVECT 4
#define N 4
int noff, moff, npoff, npp, ipp, joff, nps;
int i, j, k, m, nn, mm, nm, lxv;
float qtmh, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float dx, dy, dz, ox, oy, oz, acx, acy, acz, omxt, omyt, omzt, omt;
float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y, vx, vy, vz;
float sfxy[N*MXV*MYV], sbxy[N*MXV*MYV];
/* float sfxy[N*(mx+1)*(my+1)], sbxy[N*(mx+1)*(my+1)]; */
/* scratch arrays */
int n[NPBLK];
float s1[NPBLK*LVECT], s2[NPBLK*LVECT], t[NPBLK*2];
double sum1, sum2;
lxv = mx + 1;
qtmh = 0.5f*qbm*dt;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,noff,moff,npp,npoff,ipp,joff,nps,nn,mm,nm,x,y,vx,vy,vz, \
dxp,dyp,amx,amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm, \
rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,sum1,sfxy,sbxy,n,s1,s2,t) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[N*(i+lxv*j)] = fxy[N*(i+noff+nxv*(j+moff))];
sfxy[1+N*(i+lxv*j)] = fxy[1+N*(i+noff+nxv*(j+moff))];
sfxy[2+N*(i+lxv*j)] = fxy[2+N*(i+noff+nxv*(j+moff))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[N*(i+lxv*j)] = bxy[N*(i+noff+nxv*(j+moff))];
sbxy[1+N*(i+lxv*j)] = bxy[1+N*(i+noff+nxv*(j+moff))];
sbxy[2+N*(i+lxv*j)] = bxy[2+N*(i+noff+nxv*(j+moff))];
}
}
sum1 = 0.0;
ipp = npp/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* find interpolation weights */
x = ppart[j+joff+npoff];
y = ppart[j+joff+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
n[j] = N*(nn - noff + lxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
s1[j] = amx*amy;
s1[j+NPBLK] = dxp*amy;
s1[j+2*NPBLK] = amx*dyp;
s1[j+3*NPBLK] = dxp*dyp;
t[j] = x;
t[j+NPBLK] = y;
}
/* find acceleration */
for (j = 0; j < NPBLK; j++) {
nn = n[j];
mm = nn + N*(lxv - 2);
dx = 0.0f;
dy = 0.0f;
dz = 0.0f;
ox = 0.0f;
oy = 0.0f;
oz = 0.0f;
#pragma ivdep
for (i = 0; i < LVECT; i++) {
if (i > 1)
nn = mm;
dx += sfxy[N*i+nn]*s1[j+NPBLK*i];
dy += sfxy[1+N*i+nn]*s1[j+NPBLK*i];
dz += sfxy[2+N*i+nn]*s1[j+NPBLK*i];
ox += sbxy[N*i+nn]*s1[j+NPBLK*i];
oy += sbxy[1+N*i+nn]*s1[j+NPBLK*i];
oz += sbxy[2+N*i+nn]*s1[j+NPBLK*i];
}
s1[j] = dx;
s1[j+NPBLK] = dy;
s1[j+2*NPBLK] = dz;
s2[j] = ox;
s2[j+NPBLK] = oy;
s2[j+2*NPBLK] = oz;
}
/* new velocity */
for (j = 0; j < NPBLK; j++) {
x = t[j];
y = t[j+NPBLK];
/* calculate half impulse */
dx = qtmh*s1[j];
dy = qtmh*s1[j+NPBLK];
dz = qtmh*s1[j+2*NPBLK];
/* half acceleration */
acx = ppart[j+joff+2*nppmx+npoff] + dx;
acy = ppart[j+joff+3*nppmx+npoff] + dy;
acz = ppart[j+joff+4*nppmx+npoff] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*s2[j];
omyt = qtmh*s2[j+NPBLK];
omzt = qtmh*s2[j+2*NPBLK];
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx;
vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy;
vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz;
/* new position */
s1[j] = x + vx*dtc;
s1[j+NPBLK] = y + vy*dtc;
s2[j] = vx;
s2[j+NPBLK] = vy;
s2[j+2*NPBLK] = vz;
}
/* check boundary conditions */
#pragma novector
for (j = 0; j < NPBLK; j++) {
dx = s1[j];
dy = s1[j+NPBLK];
vx = s2[j];
vy = s2[j+NPBLK];
vz = s2[j+2*NPBLK];
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = t[j];
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = t[j+NPBLK];
vy = -vy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = t[j];
vx = -vx;
}
}
/* set new position */
ppart[j+joff+npoff] = dx;
ppart[j+joff+nppmx+npoff] = dy;
/* set new velocity */
ppart[j+joff+2*nppmx+npoff] = vx;
ppart[j+joff+3*nppmx+npoff] = vy;
ppart[j+joff+4*nppmx+npoff] = vz;
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = N*(nn - noff + lxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + N;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += N*lxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + N;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + N;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += N*lxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + N;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+2*nppmx+npoff] + dx;
acy = ppart[j+3*nppmx+npoff] + dy;
acz = ppart[j+4*nppmx+npoff] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx;
vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy;
vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz;
/* new position */
dx = x + vx*dtc;
dy = y + vy*dtc;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* set new velocity */
ppart[j+2*nppmx+npoff] = vx;
ppart[j+3*nppmx+npoff] = vy;
ppart[j+4*nppmx+npoff] = vz;
}
sum2 += sum1;
}
/* normalize kinetic energy */
*ek += 0.5*sum2;
return;
#undef N
#undef LVECT
#undef NPBLK
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cvgbppushf23lt(float ppart[], float fxy[], float bxy[], int kpic[],
int ncl[], int ihole[], float qbm, float dt,
float dtc, float *ek, int idimp, int nppmx, int nx,
int ny, int mx, int my, int nxv, int nyv, int mx1,
int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, with magnetic field. Using the Boris Mover.
with periodic boundary conditions.
also determines list of particles which are leaving this tile
vectorizable/OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
119 flops/particle, 1 divide, 29 loads, 5 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek
velocity equations used are:
vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and
omz = (q/m)*bz(x(t),y(t)).
position equations used are:
x(t+dt)=x(t) + vx(t+dt/2)*dt
y(t+dt)=y(t) + vy(t+dt/2)*dt
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x velocity of particle n in tile m
ppart[m][3][n] = y velocity of particle n in tile m
ppart[m][4][n] = z velocity of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
kinetic energy/mass at time t is also calculated, using
ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
#define NPBLK 32
#define LVECT 4
#define N 4
int noff, moff, npoff, npp, ipp, joff, nps;
int i, j, k, m, ih, nh, nn, mm, nm, lxv;
float qtmh, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz;
float acx, acy, acz, omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float anx, any, edgelx, edgely, edgerx, edgery;
float x, y, vx, vy, vz;
float sfxy[N*MXV*MYV], sbxy[N*MXV*MYV];
/* float sfxy[N*(mx+1)*(my+1)], sbxy[N*(mx+1)*(my+1)]; */
/* scratch arrays */
int n[NPBLK];
float s1[NPBLK*LVECT], s2[NPBLK*LVECT], t[NPBLK*2];
double sum1, sum2;
lxv = mx + 1;
qtmh = 0.5f*qbm*dt;
anx = (float) nx;
any = (float) ny;
sum2 = 0.0;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,noff,moff,npp,npoff,ipp,joff,nps,nn,mm,nm,ih,nh,x,y, \
vx,vy,vz,dxp,dyp,amx,amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt, \
omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,edgelx,edgely, \
edgerx,edgery,sum1,sfxy,sbxy,n,s1,s2,t) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* load local fields from global array */
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[N*(i+lxv*j)] = fxy[N*(i+noff+nxv*(j+moff))];
sfxy[1+N*(i+lxv*j)] = fxy[1+N*(i+noff+nxv*(j+moff))];
sfxy[2+N*(i+lxv*j)] = fxy[2+N*(i+noff+nxv*(j+moff))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[N*(i+lxv*j)] = bxy[N*(i+noff+nxv*(j+moff))];
sbxy[1+N*(i+lxv*j)] = bxy[1+N*(i+noff+nxv*(j+moff))];
sbxy[2+N*(i+lxv*j)] = bxy[2+N*(i+noff+nxv*(j+moff))];
}
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
sum1 = 0.0;
ipp = npp/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* find interpolation weights */
x = ppart[j+joff+npoff];
y = ppart[j+joff+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
n[j] = N*(nn - noff + lxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
s1[j] = amx*amy;
s1[j+NPBLK] = dxp*amy;
s1[j+2*NPBLK] = amx*dyp;
s1[j+3*NPBLK] = dxp*dyp;
t[j] = x;
t[j+NPBLK] = y;
}
/* find acceleration */
for (j = 0; j < NPBLK; j++) {
nn = n[j];
mm = nn + N*(lxv - 2);
dx = 0.0f;
dy = 0.0f;
dz = 0.0f;
ox = 0.0f;
oy = 0.0f;
oz = 0.0f;
#pragma ivdep
for (i = 0; i < LVECT; i++) {
if (i > 1)
nn = mm;
dx += sfxy[N*i+nn]*s1[j+NPBLK*i];
dy += sfxy[1+N*i+nn]*s1[j+NPBLK*i];
dz += sfxy[2+N*i+nn]*s1[j+NPBLK*i];
ox += sbxy[N*i+nn]*s1[j+NPBLK*i];
oy += sbxy[1+N*i+nn]*s1[j+NPBLK*i];
oz += sbxy[2+N*i+nn]*s1[j+NPBLK*i];
}
s1[j] = dx;
s1[j+NPBLK] = dy;
s1[j+2*NPBLK] = dz;
s2[j] = ox;
s2[j+NPBLK] = oy;
s2[j+2*NPBLK] = oz;
}
/* new velocity */
for (j = 0; j < NPBLK; j++) {
x = t[j];
y = t[j+NPBLK];
/* calculate half impulse */
dx = qtmh*s1[j];
dy = qtmh*s1[j+NPBLK];
dz = qtmh*s1[j+2*NPBLK];
/* half acceleration */
acx = ppart[j+joff+2*nppmx+npoff] + dx;
acy = ppart[j+joff+3*nppmx+npoff] + dy;
acz = ppart[j+joff+4*nppmx+npoff] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*s2[j];
omyt = qtmh*s2[j+NPBLK];
omzt = qtmh*s2[j+2*NPBLK];
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx;
vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy;
vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz;
/* new position */
s1[j] = x + vx*dtc;
s1[j+NPBLK] = y + vy*dtc;
s2[j] = vx;
s2[j+NPBLK] = vy;
s2[j+2*NPBLK] = vz;
}
/* check boundary conditions */
#pragma novector
for (j = 0; j < NPBLK; j++) {
dx = s1[j];
dy = s1[j+NPBLK];
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+joff+npoff] = dx;
ppart[j+joff+nppmx+npoff] = dy;
/* set new velocity */
ppart[j+joff+2*nppmx+npoff] = s2[j];
ppart[j+joff+3*nppmx+npoff] = s2[j+NPBLK];
ppart[j+joff+4*nppmx+npoff] = s2[j+2*NPBLK];
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + joff + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = N*(nn - noff + lxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + N;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += N*lxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + N;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + N;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += N*lxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + N;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+2*nppmx+npoff] + dx;
acy = ppart[j+3*nppmx+npoff] + dy;
acz = ppart[j+4*nppmx+npoff] + dz;
/* time-centered kinetic energy */
sum1 += (acx*acx + acy*acy + acz*acz);
/* calculate cyclotron frequency */
omxt = qtmh*ox;
omyt = qtmh*oy;
omzt = qtmh*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx;
vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy;
vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz;
/* new position */
dx = x + vx*dtc;
dy = y + vy*dtc;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* set new velocity */
ppart[j+2*nppmx+npoff] = vx;
ppart[j+3*nppmx+npoff] = vy;
ppart[j+4*nppmx+npoff] = vz;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
sum2 += sum1;
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* normalize kinetic energy */
*ek += 0.5*sum2;
return;
#undef N
#undef LVECT
#undef NPBLK
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cvgrbppush23lt(float ppart[], float fxy[], float bxy[], int kpic[],
float qbm, float dt, float dtc, float ci, float *ek,
int idimp, int nppmx, int nx, int ny, int mx,
int my, int nxv, int nyv, int mx1, int mxy1,
int ipbc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
Using the Boris Mover.
vectorizable/OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores
input: all, output: ppart, ek
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and
omz = (q/m)*bz(x(t),y(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic = number of particles per tile
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprocal of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
#define NPBLK 32
#define LVECT 4
#define N 4
int noff, moff, npoff, npp, ipp, joff, nps;
int i, j, k, m, nn, mm, nm, lxv;
float qtmh, ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float dx, dy, dz, ox, oy, oz, acx, acy, acz, p2, gami, qtmg, dtg;
float omxt, omyt, omzt, omt, anorm;
float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float x, y, vx, vy, vz;
float sfxy[N*MXV*MYV], sbxy[N*MXV*MYV];
/* float sfxy[N*(mx+1)*(my+1)], sbxy[N*(mx+1)*(my+1)]; */
/* scratch arrays */
int n[NPBLK];
float s1[NPBLK*LVECT], s2[NPBLK*LVECT], t[NPBLK*2];
double sum1, sum2;
lxv = mx + 1;
qtmh = 0.5f*qbm*dt;
ci2 = ci*ci;
sum2 = 0.0;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,noff,moff,npp,npoff,ipp,joff,nps,nn,mm,nm,x,y,vx,vy,vz, \
dxp,dyp,amx,amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm, \
rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,p2,gami,qtmg,dtg,sum1, \
sfxy,sbxy,n,s1,s2,t) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* load local fields from global array */
nn = (mx < nx-noff ? mx : nx-noff) + 1;
mm = (my < ny-moff ? my : ny-moff) + 1;
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[N*(i+lxv*j)] = fxy[N*(i+noff+nxv*(j+moff))];
sfxy[1+N*(i+lxv*j)] = fxy[1+N*(i+noff+nxv*(j+moff))];
sfxy[2+N*(i+lxv*j)] = fxy[2+N*(i+noff+nxv*(j+moff))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[N*(i+lxv*j)] = bxy[N*(i+noff+nxv*(j+moff))];
sbxy[1+N*(i+lxv*j)] = bxy[1+N*(i+noff+nxv*(j+moff))];
sbxy[2+N*(i+lxv*j)] = bxy[2+N*(i+noff+nxv*(j+moff))];
}
}
sum1 = 0.0;
ipp = npp/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* find interpolation weights */
x = ppart[j+joff+npoff];
y = ppart[j+joff+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
n[j] = N*(nn - noff + lxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
s1[j] = amx*amy;
s1[j+NPBLK] = dxp*amy;
s1[j+2*NPBLK] = amx*dyp;
s1[j+3*NPBLK] = dxp*dyp;
t[j] = x;
t[j+NPBLK] = y;
}
/* find acceleration */
for (j = 0; j < NPBLK; j++) {
nn = n[j];
mm = nn + N*(lxv - 2);
dx = 0.0f;
dy = 0.0f;
dz = 0.0f;
ox = 0.0f;
oy = 0.0f;
oz = 0.0f;
#pragma ivdep
for (i = 0; i < LVECT; i++) {
if (i > 1)
nn = mm;
dx += sfxy[N*i+nn]*s1[j+NPBLK*i];
dy += sfxy[1+N*i+nn]*s1[j+NPBLK*i];
dz += sfxy[2+N*i+nn]*s1[j+NPBLK*i];
ox += sbxy[N*i+nn]*s1[j+NPBLK*i];
oy += sbxy[1+N*i+nn]*s1[j+NPBLK*i];
oz += sbxy[2+N*i+nn]*s1[j+NPBLK*i];
}
s1[j] = dx;
s1[j+NPBLK] = dy;
s1[j+2*NPBLK] = dz;
s2[j] = ox;
s2[j+NPBLK] = oy;
s2[j+2*NPBLK] = oz;
}
/* new momentum */
for (j = 0; j < NPBLK; j++) {
x = t[j];
y = t[j+NPBLK];
/* calculate half impulse */
dx = qtmh*s1[j];
dy = qtmh*s1[j+NPBLK];
dz = qtmh*s1[j+2*NPBLK];
/* half acceleration */
acx = ppart[j+joff+2*nppmx+npoff] + dx;
acy = ppart[j+joff+3*nppmx+npoff] + dy;
acz = ppart[j+joff+4*nppmx+npoff] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*s2[j];
omyt = qtmg*s2[j+NPBLK];
omzt = qtmg*s2[j+2*NPBLK];
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx;
vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy;
vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz;
/* update inverse gamma */
p2 = vx*vx + vy*vy + vz*vz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
s1[j] = x + vx*dtg;
s1[j+NPBLK] = y + vy*dtg;
s2[j] = vx;
s2[j+NPBLK] = vy;
s2[j+2*NPBLK] = vz;
}
/* check boundary conditions */
#pragma novector
for (j = 0; j < NPBLK; j++) {
dx = s1[j];
dy = s1[j+NPBLK];
vx = s2[j];
vy = s2[j+NPBLK];
vz = s2[j+2*NPBLK];
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = t[j];
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = t[j+NPBLK];
vy = -vy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = t[j];
vx = -vx;
}
}
/* set new position */
ppart[j+joff+npoff] = dx;
ppart[j+joff+nppmx+npoff] = dy;
/* set new momentum */
ppart[j+joff+2*nppmx+npoff] = vx;
ppart[j+joff+3*nppmx+npoff] = vy;
ppart[j+joff+4*nppmx+npoff] = vz;
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = N*(nn - noff + lxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + N;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += N*lxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + N;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + N;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += N*lxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + N;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+2*nppmx+npoff] + dx;
acy = ppart[j+3*nppmx+npoff] + dy;
acz = ppart[j+4*nppmx+npoff] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new velocity */
vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx;
vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy;
vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz;
/* update inverse gamma */
p2 = vx*vx + vy*vy + vz*vz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
dx = x + vx*dtg;
dy = y + vy*dtg;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
vy = -vy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
vx = -vx;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* set new momentum */
ppart[j+2*nppmx+npoff] = vx;
ppart[j+3*nppmx+npoff] = vy;
ppart[j+4*nppmx+npoff] = vz;
}
sum2 += sum1;
}
/* normalize kinetic energy */
*ek += sum2;
return;
#undef N
#undef LVECT
#undef NPBLK
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cvgrbppushf23lt(float ppart[], float fxy[], float bxy[],
int kpic[], int ncl[], int ihole[], float qbm,
float dt, float dtc, float ci, float *ek,
int idimp, int nppmx, int nx, int ny, int mx,
int my, int nxv, int nyv, int mx1, int mxy1,
int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine updates particle co-ordinates and
velocities using leap-frog scheme in time and first-order linear
interpolation in space, for relativistic particles with magnetic field
with periodic boundary conditions.
Using the Boris Mover.
also determines list of particles which are leaving this tile
vectorizable/OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores
input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek
momentum equations used are:
px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fx(x(t),y(t))*dt)
py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fy(x(t),y(t))*dt)
pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) +
rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) +
rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) +
.5*(q/m)*fz(x(t),y(t))*dt)
where q/m is charge/mass, and the rotation matrix is given by:
rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2)
rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2)
rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2)
rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2)
rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2)
and om**2 = omx**2 + omy**2 + omz**2
the rotation matrix is determined by:
omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and
omz = (q/m)*bz(x(t),y(t))*gami,
where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci)
position equations used are:
x(t+dt) = x(t) + px(t+dt/2)*dtg
y(t+dt) = y(t) + py(t+dt/2)*dtg
where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+
pz(t+dt/2)*pz(t+dt/2))*ci*ci)
fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t))
bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t))
are approximated by interpolation from the nearest grid points:
fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1)
+ dx*fx(n+1,m+1))
where n,m = leftmost grid points and dx = x-n, dy = y-m
similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
fxy[k][j][0] = x component of force/charge at grid (j,k)
fxy[k][j][1] = y component of force/charge at grid (j,k)
fxy[k][j][2] = z component of force/charge at grid (j,k)
that is, convolution of electric field over particle shape
bxy[k][j][0] = x component of magnetic field at grid (j,k)
bxy[k][j][1] = y component of magnetic field at grid (j,k)
bxy[k][j][2] = z component of magnetic field at grid (j,k)
that is, the convolution of magnetic field over particle shape
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qbm = particle charge/mass ratio
dt = time interval between successive calculations
dtc = time interval between successive co-ordinate calculations
ci = reciprocal of velocity of light
kinetic energy/mass at time t is also calculated, using
ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 +
(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 +
(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami)
idimp = size of phase space = 5
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of field arrays, must be >= nx+1
nyv = third dimension of field arrays, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
#define NPBLK 32
#define LVECT 4
#define N 4
int noff, moff, npoff, npp, ipp, joff, nps;
int i, j, k, m, ih, nh, nn, mm, nm, lxv;
float qtmh, ci2, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz;
float acx, acy, acz, p2, gami, qtmg, dtg, omxt, omyt, omzt, omt;
float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9;
float anx, any, edgelx, edgely, edgerx, edgery;
float x, y, vx, vy, vz;
float sfxy[N*MXV*MYV], sbxy[N*MXV*MYV];
/* float sfxy[N*(mx+1)*(my+1)], sbxy[N*(mx+1)*(my+1)]; */
/* scratch arrays */
int n[NPBLK];
float s1[NPBLK*LVECT], s2[NPBLK*LVECT], t[NPBLK*2];
double sum1, sum2;
lxv = mx + 1;
qtmh = 0.5f*qbm*dt;
ci2 = ci*ci;
anx = (float) nx;
any = (float) ny;
sum2 = 0.0;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,noff,moff,npp,npoff,ipp,joff,nps,nn,mm,nm,ih,nh,x,y,vx, \
vy,vz,dxp,dyp,amx,amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt, \
anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,edgelx,edgely, \
edgerx,edgery,p2,gami,qtmg,dtg,sum1,sfxy,sbxy,n,s1,s2,t) \
reduction(+:sum2)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* load local fields from global array */
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sfxy[N*(i+lxv*j)] = fxy[N*(i+noff+nxv*(j+moff))];
sfxy[1+N*(i+lxv*j)] = fxy[1+N*(i+noff+nxv*(j+moff))];
sfxy[2+N*(i+lxv*j)] = fxy[2+N*(i+noff+nxv*(j+moff))];
}
}
for (j = 0; j < mm; j++) {
for (i = 0; i < nn; i++) {
sbxy[N*(i+lxv*j)] = bxy[N*(i+noff+nxv*(j+moff))];
sbxy[1+N*(i+lxv*j)] = bxy[1+N*(i+noff+nxv*(j+moff))];
sbxy[2+N*(i+lxv*j)] = bxy[2+N*(i+noff+nxv*(j+moff))];
}
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
sum1 = 0.0;
ipp = npp/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* find interpolation weights */
x = ppart[j+joff+npoff];
y = ppart[j+joff+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
n[j] = N*(nn - noff + lxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
s1[j] = amx*amy;
s1[j+NPBLK] = dxp*amy;
s1[j+2*NPBLK] = amx*dyp;
s1[j+3*NPBLK] = dxp*dyp;
t[j] = x;
t[j+NPBLK] = y;
}
/* find acceleration */
for (j = 0; j < NPBLK; j++) {
nn = n[j];
mm = nn + N*(lxv - 2);
dx = 0.0f;
dy = 0.0f;
dz = 0.0f;
ox = 0.0f;
oy = 0.0f;
oz = 0.0f;
#pragma ivdep
for (i = 0; i < LVECT; i++) {
if (i > 1)
nn = mm;
dx += sfxy[N*i+nn]*s1[j+NPBLK*i];
dy += sfxy[1+N*i+nn]*s1[j+NPBLK*i];
dz += sfxy[2+N*i+nn]*s1[j+NPBLK*i];
ox += sbxy[N*i+nn]*s1[j+NPBLK*i];
oy += sbxy[1+N*i+nn]*s1[j+NPBLK*i];
oz += sbxy[2+N*i+nn]*s1[j+NPBLK*i];
}
s1[j] = dx;
s1[j+NPBLK] = dy;
s1[j+2*NPBLK] = dz;
s2[j] = ox;
s2[j+NPBLK] = oy;
s2[j+2*NPBLK] = oz;
}
/* new momentum */
for (j = 0; j < NPBLK; j++) {
x = t[j];
y = t[j+NPBLK];
/* calculate half impulse */
dx = qtmh*s1[j];
dy = qtmh*s1[j+NPBLK];
dz = qtmh*s1[j+2*NPBLK];
/* half acceleration */
acx = ppart[j+joff+2*nppmx+npoff] + dx;
acy = ppart[j+joff+3*nppmx+npoff] + dy;
acz = ppart[j+joff+4*nppmx+npoff] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*s2[j];
omyt = qtmg*s2[j+NPBLK];
omzt = qtmg*s2[j+2*NPBLK];
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new momentum */
vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx;
vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy;
vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz;
/* update inverse gamma */
p2 = vx*vx + vy*vy + vz*vz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
s1[j] = x + vx*dtg;
s1[j+NPBLK] = y + vy*dtg;
s2[j] = vx;
s2[j+NPBLK] = vy;
s2[j+2*NPBLK] = vz;
}
/* check boundary conditions */
#pragma novector
for (j = 0; j < NPBLK; j++) {
dx = s1[j];
dy = s1[j+NPBLK];
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+joff+npoff] = dx;
ppart[j+joff+nppmx+npoff] = dy;
/* set new momentum */
ppart[j+joff+2*nppmx+npoff] = s2[j];
ppart[j+joff+3*nppmx+npoff] = s2[j+NPBLK];
ppart[j+joff+4*nppmx+npoff] = s2[j+2*NPBLK];
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + joff + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = x - (float) nn;
dyp = y - (float) mm;
nm = N*(nn - noff + lxv*(mm - moff));
amx = 1.0f - dxp;
amy = 1.0f - dyp;
/* find electric field */
nn = nm;
dx = amx*sfxy[nn];
dy = amx*sfxy[nn+1];
dz = amx*sfxy[nn+2];
mm = nn + N;
dx = amy*(dxp*sfxy[mm] + dx);
dy = amy*(dxp*sfxy[mm+1] + dy);
dz = amy*(dxp*sfxy[mm+2] + dz);
nn += N*lxv;
acx = amx*sfxy[nn];
acy = amx*sfxy[nn+1];
acz = amx*sfxy[nn+2];
mm = nn + N;
dx += dyp*(dxp*sfxy[mm] + acx);
dy += dyp*(dxp*sfxy[mm+1] + acy);
dz += dyp*(dxp*sfxy[mm+2] + acz);
/* find magnetic field */
nn = nm;
ox = amx*sbxy[nn];
oy = amx*sbxy[nn+1];
oz = amx*sbxy[nn+2];
mm = nn + N;
ox = amy*(dxp*sbxy[mm] + ox);
oy = amy*(dxp*sbxy[mm+1] + oy);
oz = amy*(dxp*sbxy[mm+2] + oz);
nn += N*lxv;
acx = amx*sbxy[nn];
acy = amx*sbxy[nn+1];
acz = amx*sbxy[nn+2];
mm = nn + N;
ox += dyp*(dxp*sbxy[mm] + acx);
oy += dyp*(dxp*sbxy[mm+1] + acy);
oz += dyp*(dxp*sbxy[mm+2] + acz);
/* calculate half impulse */
dx *= qtmh;
dy *= qtmh;
dz *= qtmh;
/* half acceleration */
acx = ppart[j+2*nppmx+npoff] + dx;
acy = ppart[j+3*nppmx+npoff] + dy;
acz = ppart[j+4*nppmx+npoff] + dz;
/* find inverse gamma */
p2 = acx*acx + acy*acy + acz*acz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* renormalize magnetic field */
qtmg = qtmh*gami;
/* time-centered kinetic energy */
sum1 += gami*p2/(1.0f + gami);
/* calculate cyclotron frequency */
omxt = qtmg*ox;
omyt = qtmg*oy;
omzt = qtmg*oz;
/* calculate rotation matrix */
omt = omxt*omxt + omyt*omyt + omzt*omzt;
anorm = 2.0f/(1.0f + omt);
omt = 0.5f*(1.0f - omt);
rot4 = omxt*omyt;
rot7 = omxt*omzt;
rot8 = omyt*omzt;
rot1 = omt + omxt*omxt;
rot5 = omt + omyt*omyt;
rot9 = omt + omzt*omzt;
rot2 = omzt + rot4;
rot4 -= omzt;
rot3 = -omyt + rot7;
rot7 += omyt;
rot6 = omxt + rot8;
rot8 -= omxt;
/* new momentum */
vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx;
vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy;
vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz;
/* update inverse gamma */
p2 = vx*vx + vy*vy + vz*vz;
dtg = dtc/sqrtf(1.0f + p2*ci2);
/* new position */
dx = x + vx*dtg;
dy = y + vy*dtg;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* set new momentum */
ppart[j+2*nppmx+npoff] = vx;
ppart[j+3*nppmx+npoff] = vy;
ppart[j+4*nppmx+npoff] = vz;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
sum2 += sum1;
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* normalize kinetic energy */
*ek += sum2;
return;
#undef N
#undef LVECT
#undef NPBLK
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgppost2lt(float ppart[], float q[], int kpic[], float qm,
int nppmx, int idimp, int mx, int my, int nxv, int nyv,
int mx1, int mxy1) {
/* for 2d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
17 flops/particle, 6 loads, 4 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m)=qm*(1.-dx)*(1.-dy)
q(n+1,m)=qm*dx*(1.-dy)
q(n,m+1)=qm*(1.-dx)*dy
q(n+1,m+1)=qm*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
q[k][j] = charge density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 4
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
local data */
#define MXV 33
#define MYV 33
int noff, moff, npoff, npp, mxv;
int i, j, k, nn, mm;
float x, y, dxp, dyp, amx, amy;
float sq[MXV*MYV];
/* float sq[(mx+1)*(my+1)]; */
mxv = mx + 1;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,sq)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
for (j = 0; j < mxv*(my+1); j++) {
sq[j] = 0.0f;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = nn - noff + mxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit charge within tile to local accumulator */
x = sq[nn] + amx*amy;
y = sq[nn+1] + dxp*amy;
sq[nn] = x;
sq[nn+1] = y;
nn += mxv;
x = sq[nn] + amx*dyp;
y = sq[nn+1] + dxp*dyp;
sq[nn] = x;
sq[nn+1] = y;
}
/* deposit charge to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
q[i+noff+nxv*(j+moff)] += sq[i+mxv*j];
}
}
/* deposit charge to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff] += sq[i];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)] += sq[i+mxv*(mm-1)];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)] += sq[mxv*j];
if (nn > mx) {
#pragma omp atomic
q[nn+noff-1+nxv*(j+moff)] += sq[nn-1+mxv*j];
}
}
}
return;
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cvgppost2lt(float ppart[], float q[], int kpic[], float qm,
int nppmx, int idimp, int mx, int my, int nxv, int nyv,
int mx1, int mxy1) {
/* for 2d code, this subroutine calculates particle charge density
using first-order linear interpolation, periodic boundaries
vectorizable/OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
17 flops/particle, 6 loads, 4 stores
input: all, output: q
charge density is approximated by values at the nearest grid points
q(n,m)=qm*(1.-dx)*(1.-dy)
q(n+1,m)=qm*dx*(1.-dy)
q(n,m+1)=qm*(1.-dx)*dy
q(n+1,m+1)=qm*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
q[k][j] = charge density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
nppmx = maximum number of particles in tile
idimp = size of phase space = 4
mx/my = number of grids in sorting cell in x/y
nxv = first dimension of charge array, must be >= nx+1
nyv = second dimension of charge array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
local data */
#define MXV 33
#define MYV 33
#define NPBLK 32
#define LVECT 4
int noff, moff, npoff, npp, ipp, joff, nps;
int i, j, k, m, nn, mm, lxv;
float x, y, dxp, dyp, amx, amy;
float sq[MXV*MYV];
/* float sq[(mx+1)*(my+1)]; */
/* scratch arrays */
int n[NPBLK];
float s[NPBLK*LVECT];
lxv = mx + 1;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,noff,moff,npp,npoff,ipp,joff,nps,nn,mm,x,y,dxp,dyp, \
amx,amy,sq,n,s)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
for (j = 0; j < lxv*(my+1); j++) {
sq[j] = 0.0f;
}
/* loop over particles in tile */
ipp = npp/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* find interpolation weights */
x = ppart[j+joff+npoff];
y = ppart[j+joff+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
n[j] = nn - noff + lxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
s[j] = amx*amy;
s[j+NPBLK] = dxp*amy;
s[j+2*NPBLK] = amx*dyp;
s[j+3*NPBLK] = dxp*dyp;
}
/* deposit charge within tile to local accumulator */
for (j = 0; j < NPBLK; j++) {
nn = n[j];
mm = nn + lxv - 2;
#pragma ivdep
for (i = 0; i < LVECT; i++) {
if (i > 1)
nn = mm;
sq[i+nn] += s[j+NPBLK*i];
}
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = nn - noff + lxv*(mm - moff);
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit charge within tile to local accumulator */
x = sq[nn] + amx*amy;
y = sq[nn+1] + dxp*amy;
sq[nn] = x;
sq[nn+1] = y;
nn += lxv;
x = sq[nn] + amx*dyp;
y = sq[nn+1] + dxp*dyp;
sq[nn] = x;
sq[nn+1] = y;
}
/* deposit charge to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
q[i+noff+nxv*(j+moff)] += sq[i+lxv*j];
}
}
/* deposit charge to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
q[i+noff+nxv*moff] += sq[i];
if (mm > my) {
#pragma omp atomic
q[i+noff+nxv*(mm+moff-1)] += sq[i+lxv*(mm-1)];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
q[noff+nxv*(j+moff)] += sq[lxv*j];
if (nn > mx) {
#pragma omp atomic
q[nn+noff-1+nxv*(j+moff)] += sq[nn-1+lxv*j];
}
}
}
return;
#undef LVECT
#undef NPBLK
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgjppost2lt(float ppart[], float cu[], int kpic[], float qm,
float dt, int nppmx, int idimp, int nx, int ny, int mx,
int my, int nxv, int nyv, int mx1, int mxy1,
int ipbc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
41 flops/particle, 17 loads, 14 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*vi, where i = x,y,z
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x velocity of particle n in tile m
ppart[m][3][n] = y velocity of particle n in tile m
ppart[m][4][n] = z velocity of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
#define N 4
int noff, moff, npoff, npp, mxv;
int i, j, k, nn, mm;
float edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz;
float scu[N*MXV*MYV];
/* float scu[N*(mx+1)*(my+1)]; */
mxv = mx + 1;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,dx,dy,vx, \
vy,vz,scu)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
for (j = 0; j < N*mxv*(my+1); j++) {
scu[j] = 0.0f;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = N*(nn - noff + mxv*(mm - moff));
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ppart[j+2*nppmx+npoff];
vy = ppart[j+3*nppmx+npoff];
vz = ppart[j+4*nppmx+npoff];
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += N*mxv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+2*nppmx+npoff] = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+3*nppmx+npoff] = -vy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+2*nppmx+npoff] = -vx;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
}
/* deposit current to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[N*(i+noff+nxv*(j+moff))] += scu[N*(i+mxv*j)];
cu[1+N*(i+noff+nxv*(j+moff))] += scu[1+N*(i+mxv*j)];
cu[2+N*(i+noff+nxv*(j+moff))] += scu[2+N*(i+mxv*j)];
}
}
/* deposit current to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[N*(i+noff+nxv*moff)] += scu[N*i];
#pragma omp atomic
cu[1+N*(i+noff+nxv*moff)] += scu[1+N*i];
#pragma omp atomic
cu[2+N*(i+noff+nxv*moff)] += scu[2+N*i];
if (mm > my) {
#pragma omp atomic
cu[N*(i+noff+nxv*(mm+moff-1))] += scu[N*(i+mxv*(mm-1))];
#pragma omp atomic
cu[1+N*(i+noff+nxv*(mm+moff-1))] += scu[1+N*(i+mxv*(mm-1))];
#pragma omp atomic
cu[2+N*(i+noff+nxv*(mm+moff-1))] += scu[2+N*(i+mxv*(mm-1))];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[N*(noff+nxv*(j+moff))] += scu[N*mxv*j];
#pragma omp atomic
cu[1+N*(noff+nxv*(j+moff))] += scu[1+N*mxv*j];
#pragma omp atomic
cu[2+N*(noff+nxv*(j+moff))] += scu[2+N*mxv*j];
if (nn > mx) {
#pragma omp atomic
cu[N*(nn+noff-1+nxv*(j+moff))] += scu[N*((nn-1)+mxv*j)];
#pragma omp atomic
cu[1+N*(nn+noff-1+nxv*(j+moff))] += scu[1+N*((nn-1)+mxv*j)];
#pragma omp atomic
cu[2+N*(nn+noff-1+nxv*(j+moff))] += scu[2+N*((nn-1)+mxv*j)];
}
}
}
return;
#undef N
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgjppostf2lt(float ppart[], float cu[], int kpic[], int ncl[],
int ihole[], float qm, float dt, int nppmx, int idimp,
int nx, int ny, int mx, int my, int nxv, int nyv,
int mx1, int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
with periodic boundary conditions.
also determines list of particles which are leaving this tile
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
41 flops/particle, 17 loads, 14 stores
input: all except ncl, ihole, irc,
output: ppart, cu, ncl, ihole, irc
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*vi, where i = x,y,z
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x velocity of particle n in tile m
ppart[m][3][n] = y velocity of particle n in tile m
ppart[m][4][n] = z velocity of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
#define N 4
int noff, moff, npoff, npp;
int i, j, k, ih, nh, nn, mm, mxv;
float dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz;
float anx, any, edgelx, edgely, edgerx, edgery;
float scu[N*MXV*MYV];
/* float scu[N*(mx+1)*(my+1)]; */
mxv = mx + 1;
anx = (float) nx;
any = (float) ny;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,ih,nh,x,y,dxp,dyp,amx,amy,dx, \
dy,vx,vy,vz,edgelx,edgely,edgerx,edgery,scu)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* zero out local accumulator */
for (j = 0; j < N*mxv*(my+1); j++) {
scu[j] = 0.0f;
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = N*(nn - noff + mxv*(mm - moff));
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ppart[j+2*nppmx+npoff];
vy = ppart[j+3*nppmx+npoff];
vz = ppart[j+4*nppmx+npoff];
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += N*mxv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
/* deposit current to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[N*(i+noff+nxv*(j+moff))] += scu[N*(i+mxv*j)];
cu[1+N*(i+noff+nxv*(j+moff))] += scu[1+N*(i+mxv*j)];
cu[2+N*(i+noff+nxv*(j+moff))] += scu[2+N*(i+mxv*j)];
}
}
/* deposit current to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[N*(i+noff+nxv*moff)] += scu[N*i];
#pragma omp atomic
cu[1+N*(i+noff+nxv*moff)] += scu[1+N*i];
#pragma omp atomic
cu[2+N*(i+noff+nxv*moff)] += scu[2+N*i];
if (mm > my) {
#pragma omp atomic
cu[N*(i+noff+nxv*(mm+moff-1))] += scu[N*(i+mxv*(mm-1))];
#pragma omp atomic
cu[1+N*(i+noff+nxv*(mm+moff-1))] += scu[1+N*(i+mxv*(mm-1))];
#pragma omp atomic
cu[2+N*(i+noff+nxv*(mm+moff-1))] += scu[2+N*(i+mxv*(mm-1))];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[N*(noff+nxv*(j+moff))] += scu[N*mxv*j];
#pragma omp atomic
cu[1+N*(noff+nxv*(j+moff))] += scu[1+N*mxv*j];
#pragma omp atomic
cu[2+N*(noff+nxv*(j+moff))] += scu[2+N*mxv*j];
if (nn > mx) {
#pragma omp atomic
cu[N*(nn+noff-1+nxv*(j+moff))] += scu[N*((nn-1)+mxv*j)];
#pragma omp atomic
cu[1+N*(nn+noff-1+nxv*(j+moff))] += scu[1+N*((nn-1)+mxv*j)];
#pragma omp atomic
cu[2+N*(nn+noff-1+nxv*(j+moff))] += scu[2+N*((nn-1)+mxv*j)];
}
}
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
return;
#undef N
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgrjppost2lt(float ppart[], float cu[], int kpic[], float qm,
float dt, float ci, int nppmx, int idimp, int nx,
int ny, int mx, int my, int nxv, int nyv, int mx1,
int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprocal of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
#define N 4
int noff, moff, npoff, npp, mxv;
int i, j, k, nn, mm;
float ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz, ux, uy, uz, p2, gami;
float scu[N*MXV*MYV];
/* float scu[N*(mx+1)*(my+1)]; */
mxv = mx + 1;
ci2 = ci*ci;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,x,y,dxp,dyp,amx,amy,dx,dy,vx, \
vy,vz,ux,uy,uz,p2,gami,scu)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
for (j = 0; j < N*mxv*(my+1); j++) {
scu[j] = 0.0f;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
/* find inverse gamma */
ux = ppart[j+2*nppmx+npoff];
uy = ppart[j+3*nppmx+npoff];
uz = ppart[j+4*nppmx+npoff];
p2 = ux*ux + uy*uy + uz*uz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* calculate weights */
nn = N*(nn - noff + mxv*(mm - moff));
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ux*gami;
vy = uy*gami;
vz = uz*gami;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += N*mxv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+2*nppmx+npoff] = -ux;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+3*nppmx+npoff] = -uy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+2*nppmx+npoff] = -ux;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
}
/* deposit current to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[N*(i+noff+nxv*(j+moff))] += scu[N*(i+mxv*j)];
cu[1+N*(i+noff+nxv*(j+moff))] += scu[1+N*(i+mxv*j)];
cu[2+N*(i+noff+nxv*(j+moff))] += scu[2+N*(i+mxv*j)];
}
}
/* deposit current to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[N*(i+noff+nxv*moff)] += scu[N*i];
#pragma omp atomic
cu[1+N*(i+noff+nxv*moff)] += scu[1+N*i];
#pragma omp atomic
cu[2+N*(i+noff+nxv*moff)] += scu[2+N*i];
if (mm > my) {
#pragma omp atomic
cu[N*(i+noff+nxv*(mm+moff-1))] += scu[N*(i+mxv*(mm-1))];
#pragma omp atomic
cu[1+N*(i+noff+nxv*(mm+moff-1))] += scu[1+N*(i+mxv*(mm-1))];
#pragma omp atomic
cu[2+N*(i+noff+nxv*(mm+moff-1))] += scu[2+N*(i+mxv*(mm-1))];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[N*(noff+nxv*(j+moff))] += scu[N*mxv*j];
#pragma omp atomic
cu[1+N*(noff+nxv*(j+moff))] += scu[1+N*mxv*j];
#pragma omp atomic
cu[2+N*(noff+nxv*(j+moff))] += scu[2+N*mxv*j];
if (nn > mx) {
#pragma omp atomic
cu[N*(nn+noff-1+nxv*(j+moff))] += scu[N*((nn-1)+mxv*j)];
#pragma omp atomic
cu[1+N*(nn+noff-1+nxv*(j+moff))] += scu[1+N*((nn-1)+mxv*j)];
#pragma omp atomic
cu[2+N*(nn+noff-1+nxv*(j+moff))] += scu[2+N*((nn-1)+mxv*j)];
}
}
}
return;
#undef N
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cgrjppostf2lt(float ppart[], float cu[], int kpic[], int ncl[],
int ihole[], float qm, float dt, float ci, int nppmx,
int idimp, int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation for relativistic particles
in addition, particle positions are advanced a half time-step
with periodic boundary conditions.
also determines list of particles which are leaving this tile
OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores
input: all except ncl, ihole, irc,
output: ppart, cu, ncl, ihole, irc
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprocal of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
#define N 4
int noff, moff, npoff, npp;
int i, j, k, ih, nh, nn, mm, mxv;
float ci2, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz, ux, uy, uz, p2, gami;
float anx, any, edgelx, edgely, edgerx, edgery;
float scu[N*MXV*MYV];
/* float scu[N*(mx+1)*(my+1)]; */
mxv = mx + 1;
ci2 = ci*ci;
anx = (float) nx;
any = (float) ny;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,noff,moff,npp,npoff,nn,mm,ih,nh,x,y,dxp,dyp,amx,amy,dx, \
dy,vx,vy,vz,ux,uy,uz,edgelx,edgely,edgerx,edgery,p2,gami,scu)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* zero out local accumulator */
for (j = 0; j < N*mxv*(my+1); j++) {
scu[j] = 0.0f;
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
/* find inverse gamma */
ux = ppart[j+2*nppmx+npoff];
uy = ppart[j+3*nppmx+npoff];
uz = ppart[j+4*nppmx+npoff];
p2 = ux*ux + uy*uy + uz*uz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* calculate weights */
nn = N*(nn - noff + mxv*(mm - moff));
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ux*gami;
vy = uy*gami;
vz = uz*gami;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += N*mxv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
/* deposit current to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[N*(i+noff+nxv*(j+moff))] += scu[N*(i+mxv*j)];
cu[1+N*(i+noff+nxv*(j+moff))] += scu[1+N*(i+mxv*j)];
cu[2+N*(i+noff+nxv*(j+moff))] += scu[2+N*(i+mxv*j)];
}
}
/* deposit current to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[N*(i+noff+nxv*moff)] += scu[N*i];
#pragma omp atomic
cu[1+N*(i+noff+nxv*moff)] += scu[1+N*i];
#pragma omp atomic
cu[2+N*(i+noff+nxv*moff)] += scu[2+N*i];
if (mm > my) {
#pragma omp atomic
cu[N*(i+noff+nxv*(mm+moff-1))] += scu[N*(i+mxv*(mm-1))];
#pragma omp atomic
cu[1+N*(i+noff+nxv*(mm+moff-1))] += scu[1+N*(i+mxv*(mm-1))];
#pragma omp atomic
cu[2+N*(i+noff+nxv*(mm+moff-1))] += scu[2+N*(i+mxv*(mm-1))];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[N*(noff+nxv*(j+moff))] += scu[N*mxv*j];
#pragma omp atomic
cu[1+N*(noff+nxv*(j+moff))] += scu[1+N*mxv*j];
#pragma omp atomic
cu[2+N*(noff+nxv*(j+moff))] += scu[2+N*mxv*j];
if (nn > mx) {
#pragma omp atomic
cu[N*(nn+noff-1+nxv*(j+moff))] += scu[N*((nn-1)+mxv*j)];
#pragma omp atomic
cu[1+N*(nn+noff-1+nxv*(j+moff))] += scu[1+N*((nn-1)+mxv*j)];
#pragma omp atomic
cu[2+N*(nn+noff-1+nxv*(j+moff))] += scu[2+N*((nn-1)+mxv*j)];
}
}
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
return;
#undef N
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cvgjppost2lt(float ppart[], float cu[], int kpic[], float qm,
float dt, int nppmx, int idimp, int nx, int ny,
int mx, int my, int nxv, int nyv, int mx1, int mxy1,
int ipbc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
vectorizable/OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
41 flops/particle, 17 loads, 14 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*vi, where i = x,y,z
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x velocity of particle n in tile m
ppart[m][3][n] = y velocity of particle n in tile m
ppart[m][4][n] = z velocity of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
#define NPBLK 32
#define LVECT 4
#define N 4
int noff, moff, npoff, npp, lxv;
int i, j, k, m, ipp, joff, nps, nn, mm;
float edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz;
float scu[N*MXV*MYV];
/* float scu[N*(mx+1)*(my+1)]; */
/* scratch arrays */
int n[NPBLK];
float s1[NPBLK*LVECT], s2[NPBLK*LVECT], t[NPBLK*2];
lxv = mx + 1;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,noff,moff,npp,npoff,ipp,joff,nps,nn,mm,x,y,dxp,dyp,amx, \
amy,dx,dy,vx,vy,vz,scu,n,s1,s2,t)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
for (j = 0; j < N*lxv*(my+1); j++) {
scu[j] = 0.0f;
}
ipp = npp/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* find interpolation weights */
x = ppart[j+joff+npoff];
y = ppart[j+joff+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
n[j] = N*(nn - noff + lxv*(mm - moff));
amx = qm - dxp;
amy = 1.0f - dyp;
s1[j] = amx*amy;
s1[j+NPBLK] = dxp*amy;
s1[j+2*NPBLK] = amx*dyp;
s1[j+3*NPBLK] = dxp*dyp;
t[j] = x;
t[j+NPBLK] = y;
s2[j] = ppart[j+joff+2*nppmx+npoff];
s2[j+NPBLK] = ppart[j+joff+3*nppmx+npoff];
s2[j+2*NPBLK] = ppart[j+joff+4*nppmx+npoff];
}
/* deposit current */
for (j = 0; j < NPBLK; j++) {
nn = n[j];
mm = nn + N*(lxv - 2);
vx = s2[j];
vy = s2[j+NPBLK];
vz = s2[j+2*NPBLK];
#pragma ivdep
for (i = 0; i < LVECT; i++) {
if (i > 1)
nn = mm;
scu[N*i+nn] += vx*s1[j+NPBLK*i];
scu[1+N*i+nn] += vy*s1[j+NPBLK*i];
scu[2+N*i+nn] += vz*s1[j+NPBLK*i];
}
}
/* advance position half a time-step */
for (j = 0; j < NPBLK; j++) {
x = t[j];
y = t[j+NPBLK];
vx = s2[j];
vy = s2[j+NPBLK];
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+joff+2*nppmx+npoff] = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+joff+3*nppmx+npoff] = -vy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+joff+2*nppmx+npoff] = -vx;
}
}
/* set new position */
ppart[j+joff+npoff] = dx;
ppart[j+joff+nppmx+npoff] = dy;
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = N*(nn - noff + lxv*(mm - moff));
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ppart[j+2*nppmx+npoff];
vy = ppart[j+3*nppmx+npoff];
vz = ppart[j+4*nppmx+npoff];
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += N*lxv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+2*nppmx+npoff] = -vx;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+3*nppmx+npoff] = -vy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+2*nppmx+npoff] = -vx;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
}
/* deposit current to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[N*(i+noff+nxv*(j+moff))] += scu[N*(i+lxv*j)];
cu[1+N*(i+noff+nxv*(j+moff))] += scu[1+N*(i+lxv*j)];
cu[2+N*(i+noff+nxv*(j+moff))] += scu[2+N*(i+lxv*j)];
}
}
/* deposit current to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[N*(i+noff+nxv*moff)] += scu[N*i];
#pragma omp atomic
cu[1+N*(i+noff+nxv*moff)] += scu[1+N*i];
#pragma omp atomic
cu[2+N*(i+noff+nxv*moff)] += scu[2+N*i];
if (mm > my) {
#pragma omp atomic
cu[N*(i+noff+nxv*(mm+moff-1))] += scu[N*(i+lxv*(mm-1))];
#pragma omp atomic
cu[1+N*(i+noff+nxv*(mm+moff-1))] += scu[1+N*(i+lxv*(mm-1))];
#pragma omp atomic
cu[2+N*(i+noff+nxv*(mm+moff-1))] += scu[2+N*(i+lxv*(mm-1))];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[N*(noff+nxv*(j+moff))] += scu[N*lxv*j];
#pragma omp atomic
cu[1+N*(noff+nxv*(j+moff))] += scu[1+N*lxv*j];
#pragma omp atomic
cu[2+N*(noff+nxv*(j+moff))] += scu[2+N*lxv*j];
if (nn > mx) {
#pragma omp atomic
cu[N*(nn+noff-1+nxv*(j+moff))] += scu[N*((nn-1)+lxv*j)];
#pragma omp atomic
cu[1+N*(nn+noff-1+nxv*(j+moff))] += scu[1+N*((nn-1)+lxv*j)];
#pragma omp atomic
cu[2+N*(nn+noff-1+nxv*(j+moff))] += scu[2+N*((nn-1)+lxv*j)];
}
}
}
return;
#undef N
#undef LVECT
#undef NPBLK
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cvgjppostf2lt(float ppart[], float cu[], int kpic[], int ncl[],
int ihole[], float qm, float dt, int nppmx,
int idimp, int nx, int ny, int mx, int my, int nxv,
int nyv, int mx1, int mxy1, int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
with periodic boundary conditions.
also determines list of particles which are leaving this tile
vectorizable/OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
41 flops/particle, 17 loads, 14 stores
input: all except ncl, ihole, irc,
output: ppart, cu, ncl, ihole, irc
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*vi, where i = x,y,z
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x velocity of particle n in tile m
ppart[m][3][n] = y velocity of particle n in tile m
ppart[m][4][n] = z velocity of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qm = charge on particle, in units of e
dt = time interval between successive calculations
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
#define NPBLK 32
#define LVECT 4
#define N 4
int noff, moff, npoff, npp, lxv;
int i, j, k, m, ih, nh, ipp, joff, nps, nn, mm;
float dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz;
float anx, any, edgelx, edgely, edgerx, edgery;
float scu[N*MXV*MYV];
/* float scu[N*(mx+1)*(my+1)]; */
/* scratch arrays */
int n[NPBLK];
float s1[NPBLK*LVECT], s2[NPBLK*LVECT], t[NPBLK*2];
lxv = mx + 1;
anx = (float) nx;
any = (float) ny;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,noff,moff,npp,npoff,ipp,joff,nps,nn,mm,ih,nh,x,y,dxp, \
dyp,amx,amy,dx,dy,vx,vy,vz,edgelx,edgely,edgerx,edgery,scu,n,s1,s2,t)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* zero out local accumulator */
for (j = 0; j < N*lxv*(my+1); j++) {
scu[j] = 0.0f;
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
ipp = npp/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* find interpolation weights */
x = ppart[j+joff+npoff];
y = ppart[j+joff+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
n[j] = N*(nn - noff + lxv*(mm - moff));
amx = qm - dxp;
amy = 1.0f - dyp;
s1[j] = amx*amy;
s1[j+NPBLK] = dxp*amy;
s1[j+2*NPBLK] = amx*dyp;
s1[j+3*NPBLK] = dxp*dyp;
t[j] = x;
t[j+NPBLK] = y;
s2[j] = ppart[j+joff+2*nppmx+npoff];
s2[j+NPBLK] = ppart[j+joff+3*nppmx+npoff];
s2[j+2*NPBLK] = ppart[j+joff+4*nppmx+npoff];
}
/* deposit current */
for (j = 0; j < NPBLK; j++) {
nn = n[j];
mm = nn + N*(lxv - 2);
vx = s2[j];
vy = s2[j+NPBLK];
vz = s2[j+2*NPBLK];
#pragma ivdep
for (i = 0; i < LVECT; i++) {
if (i > 1)
nn = mm;
scu[N*i+nn] += vx*s1[j+NPBLK*i];
scu[1+N*i+nn] += vy*s1[j+NPBLK*i];
scu[2+N*i+nn] += vz*s1[j+NPBLK*i];
}
}
/* advance position half a time-step */
for (j = 0; j < NPBLK; j++) {
dx = t[j] + s2[j]*dt;
dy = t[j+NPBLK] + s2[j+NPBLK]*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+joff+npoff] = dx;
ppart[j+joff+nppmx+npoff] = dy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + joff + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
nn = N*(nn - noff + lxv*(mm - moff));
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ppart[j+2*nppmx+npoff];
vy = ppart[j+3*nppmx+npoff];
vz = ppart[j+4*nppmx+npoff];
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += N*lxv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
/* deposit current to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[N*(i+noff+nxv*(j+moff))] += scu[N*(i+lxv*j)];
cu[1+N*(i+noff+nxv*(j+moff))] += scu[1+N*(i+lxv*j)];
cu[2+N*(i+noff+nxv*(j+moff))] += scu[2+N*(i+lxv*j)];
}
}
/* deposit current to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[N*(i+noff+nxv*moff)] += scu[N*i];
#pragma omp atomic
cu[1+N*(i+noff+nxv*moff)] += scu[1+N*i];
#pragma omp atomic
cu[2+N*(i+noff+nxv*moff)] += scu[2+N*i];
if (mm > my) {
#pragma omp atomic
cu[N*(i+noff+nxv*(mm+moff-1))] += scu[N*(i+lxv*(mm-1))];
#pragma omp atomic
cu[1+N*(i+noff+nxv*(mm+moff-1))] += scu[1+N*(i+lxv*(mm-1))];
#pragma omp atomic
cu[2+N*(i+noff+nxv*(mm+moff-1))] += scu[2+N*(i+lxv*(mm-1))];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[N*(noff+nxv*(j+moff))] += scu[N*lxv*j];
#pragma omp atomic
cu[1+N*(noff+nxv*(j+moff))] += scu[1+N*lxv*j];
#pragma omp atomic
cu[2+N*(noff+nxv*(j+moff))] += scu[2+N*lxv*j];
if (nn > mx) {
#pragma omp atomic
cu[N*(nn+noff-1+nxv*(j+moff))] += scu[N*((nn-1)+lxv*j)];
#pragma omp atomic
cu[1+N*(nn+noff-1+nxv*(j+moff))] += scu[1+N*((nn-1)+lxv*j)];
#pragma omp atomic
cu[2+N*(nn+noff-1+nxv*(j+moff))] += scu[2+N*((nn-1)+lxv*j)];
}
}
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
return;
#undef N
#undef LVECT
#undef NPBLK
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cvgrjppost2lt(float ppart[], float cu[], int kpic[], float qm,
float dt, float ci, int nppmx, int idimp, int nx,
int ny, int mx, int my, int nxv, int nyv, int mx1,
int mxy1, int ipbc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation
in addition, particle positions are advanced a half time-step
vectorizable/OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores
input: all, output: ppart, cu
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic = number of particles per tile
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprocal of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ipbc = particle boundary condition = (0,1,2,3) =
(none,2d periodic,2d reflecting,mixed reflecting/periodic)
local data */
#define MXV 33
#define MYV 33
#define NPBLK 32
#define LVECT 4
#define N 4
int noff, moff, npoff, npp, lxv;
int i, j, k, m, ipp, joff, nps, nn, mm;
float ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz, ux, uy, uz, p2, gami;
float scu[N*MXV*MYV];
/* float scu[N*(mx+1)*(my+1)]; */
/* scratch arrays */
int n[NPBLK];
float s1[NPBLK*LVECT], s2[NPBLK*LVECT], t[NPBLK*4];
lxv = mx + 1;
ci2 = ci*ci;
/* set boundary values */
edgelx = 0.0f;
edgely = 0.0f;
edgerx = (float) nx;
edgery = (float) ny;
if (ipbc==2) {
edgelx = 1.0f;
edgely = 1.0f;
edgerx = (float) (nx-1);
edgery = (float) (ny-1);
}
else if (ipbc==3) {
edgelx = 1.0f;
edgerx = (float) (nx-1);
}
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,noff,moff,npp,npoff,ipp,joff,nps,nn,mm,x,y,dxp,dyp,amx, \
amy,dx,dy,vx,vy,vz,ux,uy,uz,p2,gami,scu,n,s1,s2,t)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
/* zero out local accumulator */
for (j = 0; j < N*lxv*(my+1); j++) {
scu[j] = 0.0f;
}
ipp = npp/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* find interpolation weights */
x = ppart[j+joff+npoff];
y = ppart[j+joff+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
n[j] = N*(nn - noff + lxv*(mm - moff));
amx = qm - dxp;
amy = 1.0f - dyp;
s1[j] = amx*amy;
s1[j+NPBLK] = dxp*amy;
s1[j+2*NPBLK] = amx*dyp;
s1[j+3*NPBLK] = dxp*dyp;
t[j] = x;
t[j+NPBLK] = y;
/* find inverse gamma */
ux = ppart[j+joff+2*nppmx+npoff];
uy = ppart[j+joff+3*nppmx+npoff];
uz = ppart[j+joff+4*nppmx+npoff];
p2 = ux*ux + uy*uy + uz*uz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
s2[j] = ux*gami;
s2[j+NPBLK] = uy*gami;
s2[j+2*NPBLK] = uz*gami;
t[j+2*NPBLK] = ux;
t[j+3*NPBLK] = uy;
}
/* deposit current */
for (j = 0; j < NPBLK; j++) {
nn = n[j];
mm = nn + N*(lxv - 2);
vx = s2[j];
vy = s2[j+NPBLK];
vz = s2[j+2*NPBLK];
#pragma ivdep
for (i = 0; i < LVECT; i++) {
if (i > 1)
nn = mm;
scu[N*i+nn] += vx*s1[j+NPBLK*i];
scu[1+N*i+nn] += vy*s1[j+NPBLK*i];
scu[2+N*i+nn] += vz*s1[j+NPBLK*i];
}
}
/* advance position half a time-step */
for (j = 0; j < NPBLK; j++) {
x = t[j];
y = t[j+NPBLK];
vx = s2[j];
vy = s2[j+NPBLK];
ux = t[j+2*NPBLK];
uy = t[j+3*NPBLK];
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+joff+2*nppmx+npoff] = -ux;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+joff+3*nppmx+npoff] = -uy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+joff+2*nppmx+npoff] = -ux;
}
}
/* set new position */
ppart[j+joff+npoff] = dx;
ppart[j+joff+nppmx+npoff] = dy;
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
/* find inverse gamma */
ux = ppart[j+2*nppmx+npoff];
uy = ppart[j+3*nppmx+npoff];
uz = ppart[j+4*nppmx+npoff];
p2 = ux*ux + uy*uy + uz*uz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* calculate weights */
nn = N*(nn - noff + lxv*(mm - moff));
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx = ux*gami;
vy = uy*gami;
vz = uz*gami;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += N*lxv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* reflecting boundary conditions */
if (ipbc==2) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+2*nppmx+npoff] = -ux;
}
if ((dy < edgely) || (dy >= edgery)) {
dy = y;
ppart[j+3*nppmx+npoff] = -uy;
}
}
/* mixed reflecting/periodic boundary conditions */
else if (ipbc==3) {
if ((dx < edgelx) || (dx >= edgerx)) {
dx = x;
ppart[j+2*nppmx+npoff] = -ux;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
}
/* deposit current to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[N*(i+noff+nxv*(j+moff))] += scu[N*(i+lxv*j)];
cu[1+N*(i+noff+nxv*(j+moff))] += scu[1+N*(i+lxv*j)];
cu[2+N*(i+noff+nxv*(j+moff))] += scu[2+N*(i+lxv*j)];
}
}
/* deposit current to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[N*(i+noff+nxv*moff)] += scu[N*i];
#pragma omp atomic
cu[1+N*(i+noff+nxv*moff)] += scu[1+N*i];
#pragma omp atomic
cu[2+N*(i+noff+nxv*moff)] += scu[2+N*i];
if (mm > my) {
#pragma omp atomic
cu[N*(i+noff+nxv*(mm+moff-1))] += scu[N*(i+lxv*(mm-1))];
#pragma omp atomic
cu[1+N*(i+noff+nxv*(mm+moff-1))] += scu[1+N*(i+lxv*(mm-1))];
#pragma omp atomic
cu[2+N*(i+noff+nxv*(mm+moff-1))] += scu[2+N*(i+lxv*(mm-1))];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[N*(noff+nxv*(j+moff))] += scu[N*lxv*j];
#pragma omp atomic
cu[1+N*(noff+nxv*(j+moff))] += scu[1+N*lxv*j];
#pragma omp atomic
cu[2+N*(noff+nxv*(j+moff))] += scu[2+N*lxv*j];
if (nn > mx) {
#pragma omp atomic
cu[N*(nn+noff-1+nxv*(j+moff))] += scu[N*((nn-1)+lxv*j)];
#pragma omp atomic
cu[1+N*(nn+noff-1+nxv*(j+moff))] += scu[1+N*((nn-1)+lxv*j)];
#pragma omp atomic
cu[2+N*(nn+noff-1+nxv*(j+moff))] += scu[2+N*((nn-1)+lxv*j)];
}
}
}
return;
#undef N
#undef LVECT
#undef NPBLK
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cvgrjppostf2lt(float ppart[], float cu[], int kpic[], int ncl[],
int ihole[], float qm, float dt, float ci,
int nppmx, int idimp, int nx, int ny, int mx,
int my, int nxv, int nyv, int mx1, int mxy1,
int ntmax, int *irc) {
/* for 2-1/2d code, this subroutine calculates particle current density
using first-order linear interpolation for relativistic particles
in addition, particle positions are advanced a half time-step
with periodic boundary conditions.
also determines list of particles which are leaving this tile
vectorizable/OpenMP version using guard cells
data deposited in tiles
particles stored segmented array
47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores
input: all except ncl, ihole, irc,
output: ppart, cu, ncl, ihole, irc
current density is approximated by values at the nearest grid points
cu(i,n,m)=qci*(1.-dx)*(1.-dy)
cu(i,n+1,m)=qci*dx*(1.-dy)
cu(i,n,m+1)=qci*(1.-dx)*dy
cu(i,n+1,m+1)=qci*dx*dy
where n,m = leftmost grid points and dx = x-n, dy = y-m
and qci = qm*pi*gami, where i = x,y,z
where gami = 1./sqrt(1.+sum(pi**2)*ci*ci)
ppart[m][0][n] = position x of particle n in tile m
ppart[m][1][n] = position y of particle n in tile m
ppart[m][2][n] = x momentum of particle n in tile m
ppart[m][3][n] = y momentum of particle n in tile m
ppart[m][4][n] = z momentum of particle n in tile m
cu[k][j][i] = ith component of current density at grid point j,k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = destination of particle leaving hole
ihole[k][0][0] = ih, number of holes left (error, if negative)
qm = charge on particle, in units of e
dt = time interval between successive calculations
ci = reciprocal of velocity of light
nppmx = maximum number of particles in tile
idimp = size of phase space = 5
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
nxv = second dimension of current array, must be >= nx+1
nyv = third dimension of current array, must be >= ny+1
mx1 = (system length in x direction - 1)/mx + 1
mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
optimized version
local data */
#define MXV 33
#define MYV 33
#define NPBLK 32
#define LVECT 4
#define N 4
int noff, moff, npoff, npp, lxv;
int i, j, k, m, ih, nh, ipp, joff, nps, nn, mm;
float ci2, dxp, dyp, amx, amy;
float x, y, dx, dy, vx, vy, vz, p2, gami;
float anx, any, edgelx, edgely, edgerx, edgery;
float scu[N*MXV*MYV];
/* float scu[N*(mx+1)*(my+1)]; */
/* scratch arrays */
int n[NPBLK];
float s1[NPBLK*LVECT], s2[NPBLK*LVECT], t[NPBLK*2];
lxv = mx + 1;
ci2 = ci*ci;
anx = (float) nx;
any = (float) ny;
/* error if local array is too small */
/* if ((mx >= MXV) || (my >= MYV)) */
/* return; */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,noff,moff,npp,npoff,ipp,joff,nps,nn,mm,ih,nh,x,y,dxp, \
dyp,amx,amy,dx,dy,vx,vy,vz,edgelx,edgely,edgerx,edgery,p2,gami,scu,n, \
s1,s2,t)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
ih = 0;
nh = 0;
nn += 1;
mm += 1;
/* zero out local accumulator */
for (j = 0; j < N*lxv*(my+1); j++) {
scu[j] = 0.0f;
}
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
ipp = npp/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* find interpolation weights */
x = ppart[j+joff+npoff];
y = ppart[j+joff+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
n[j] = N*(nn - noff + lxv*(mm - moff));
amx = qm - dxp;
amy = 1.0f - dyp;
s1[j] = amx*amy;
s1[j+NPBLK] = dxp*amy;
s1[j+2*NPBLK] = amx*dyp;
s1[j+3*NPBLK] = dxp*dyp;
t[j] = x;
t[j+NPBLK] = y;
/* find inverse gamma */
vx = ppart[j+joff+2*nppmx+npoff];
vy = ppart[j+joff+3*nppmx+npoff];
vz = ppart[j+joff+4*nppmx+npoff];
p2 = vx*vx + vy*vy + vz*vz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
s2[j] = vx*gami;
s2[j+NPBLK] = vy*gami;
s2[j+2*NPBLK] = vz*gami;
}
/* deposit current */
for (j = 0; j < NPBLK; j++) {
nn = n[j];
mm = nn + N*(lxv - 2);
vx = s2[j];
vy = s2[j+NPBLK];
vz = s2[j+2*NPBLK];
#pragma ivdep
for (i = 0; i < LVECT; i++) {
if (i > 1)
nn = mm;
scu[N*i+nn] += vx*s1[j+NPBLK*i];
scu[1+N*i+nn] += vy*s1[j+NPBLK*i];
scu[2+N*i+nn] += vz*s1[j+NPBLK*i];
}
}
/* advance position half a time-step */
for (j = 0; j < NPBLK; j++) {
dx = t[j] + s2[j]*dt;
dy = t[j+NPBLK] + s2[j+NPBLK]*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+joff+npoff] = dx;
ppart[j+joff+nppmx+npoff] = dy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + joff + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < npp; j++) {
/* find interpolation weights */
x = ppart[j+npoff];
y = ppart[j+nppmx+npoff];
nn = x;
mm = y;
dxp = qm*(x - (float) nn);
dyp = y - (float) mm;
/* find inverse gamma */
vx = ppart[j+2*nppmx+npoff];
vy = ppart[j+3*nppmx+npoff];
vz = ppart[j+4*nppmx+npoff];
p2 = vx*vx + vy*vy + vz*vz;
gami = 1.0f/sqrtf(1.0f + p2*ci2);
/* calculate weights */
nn = N*(nn - noff + lxv*(mm - moff));
amx = qm - dxp;
amy = 1.0f - dyp;
/* deposit current */
dx = amx*amy;
dy = dxp*amy;
vx *= gami;
vy *= gami;
vz *= gami;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
dx = amx*dyp;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
dy = dxp*dyp;
nn += N*lxv;
scu[nn] += vx*dx;
scu[nn+1] += vy*dx;
scu[nn+2] += vz*dx;
mm = nn + N;
scu[mm] += vx*dy;
scu[mm+1] += vy*dy;
scu[mm+2] += vz*dy;
/* advance position half a time-step */
dx = x + vx*dt;
dy = y + vy*dt;
/* find particles going out of bounds */
mm = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* mm = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
dx -= anx;
mm = 2;
}
else if (dx < edgelx) {
if (dx < 0.0f) {
dx += anx;
if (dx < anx)
mm = 1;
else
dx = 0.0;
}
else {
mm = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
dy -= any;
mm += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
mm += 3;
else
dy = 0.0;
}
else {
mm += 3;
}
}
/* set new position */
ppart[j+npoff] = dx;
ppart[j+nppmx+npoff] = dy;
/* increment counters */
if (mm > 0) {
ncl[mm+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = mm;
}
else {
nh = 1;
}
}
}
/* deposit current to interior points in global array */
nn = nxv - noff;
mm = nyv - moff;
nn = mx < nn ? mx : nn;
mm = my < mm ? my : mm;
for (j = 1; j < mm; j++) {
for (i = 1; i < nn; i++) {
cu[N*(i+noff+nxv*(j+moff))] += scu[N*(i+lxv*j)];
cu[1+N*(i+noff+nxv*(j+moff))] += scu[1+N*(i+lxv*j)];
cu[2+N*(i+noff+nxv*(j+moff))] += scu[2+N*(i+lxv*j)];
}
}
/* deposit current to edge points in global array */
mm = nyv - moff;
mm = my+1 < mm ? my+1 : mm;
for (i = 1; i < nn; i++) {
#pragma omp atomic
cu[N*(i+noff+nxv*moff)] += scu[N*i];
#pragma omp atomic
cu[1+N*(i+noff+nxv*moff)] += scu[1+N*i];
#pragma omp atomic
cu[2+N*(i+noff+nxv*moff)] += scu[2+N*i];
if (mm > my) {
#pragma omp atomic
cu[N*(i+noff+nxv*(mm+moff-1))] += scu[N*(i+lxv*(mm-1))];
#pragma omp atomic
cu[1+N*(i+noff+nxv*(mm+moff-1))] += scu[1+N*(i+lxv*(mm-1))];
#pragma omp atomic
cu[2+N*(i+noff+nxv*(mm+moff-1))] += scu[2+N*(i+lxv*(mm-1))];
}
}
nn = nxv - noff;
nn = mx+1 < nn ? mx+1 : nn;
for (j = 0; j < mm; j++) {
#pragma omp atomic
cu[N*(noff+nxv*(j+moff))] += scu[N*lxv*j];
#pragma omp atomic
cu[1+N*(noff+nxv*(j+moff))] += scu[1+N*lxv*j];
#pragma omp atomic
cu[2+N*(noff+nxv*(j+moff))] += scu[2+N*lxv*j];
if (nn > mx) {
#pragma omp atomic
cu[N*(nn+noff-1+nxv*(j+moff))] += scu[N*((nn-1)+lxv*j)];
#pragma omp atomic
cu[1+N*(nn+noff-1+nxv*(j+moff))] += scu[1+N*((nn-1)+lxv*j)];
#pragma omp atomic
cu[2+N*(nn+noff-1+nxv*(j+moff))] += scu[2+N*((nn-1)+lxv*j)];
}
}
/* set error and end of file flag */
/* ihole overflow */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
return;
#undef N
#undef LVECT
#undef NPBLK
#undef MXV
#undef MYV
}
/*--------------------------------------------------------------------*/
void cpporder2lt(float ppart[], float ppbuff[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int nx, int ny,
int mx, int my, int mx1, int my1, int npbmx, int ntmax,
int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
algorithm has 3 steps. first, one finds particles leaving tile and
stores their number in each directon, location, and destination in ncl
and ihole. second, a prefix scan of ncl is performed and departing
particles are buffered in ppbuff in direction order. finally, we copy
the incoming particles from other tiles into ppart.
input: all except ppbuff, ncl, ihole, irc
output: ppart, ppbuff, kpic, ncl, ihole, irc
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
ppbuff[k][i][n] = i co-ordinate of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, noff, moff, npoff, npp, nboff, ncoff;
int i, j, k, ii, kx, ky, ih, nh, ist, nn, mm, isum;
int ip, j1, j2, kxl, kxr, kk, kl, kr;
float anx, any, edgelx, edgely, edgerx, edgery, dx, dy;
int ks[8];
mxy1 = mx1*my1;
anx = (float) nx;
any = (float) ny;
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(j,k,noff,moff,npp,npoff,nn,mm,ih,nh,ist,dx,dy,edgelx,edgely, \
edgerx,edgery)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ih = 0;
nh = 0;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
dx = ppart[j+npoff];
dy = ppart[j+nppmx+npoff];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[j+npoff] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0;
ppart[j+npoff] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[j+nppmx+npoff] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0;
ppart[j+nppmx+npoff] = dy;
}
else {
ist += 3;
}
}
if (ist > 0) {
ncl[ist+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = ist;
}
else {
nh = 1;
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* ihole overflow */
if (*irc > 0)
return;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,npoff,nboff,isum,ist,nh,ip,j1,ii)
for (k = 0; k < mxy1; k++) {
npoff = idimp*nppmx*k;
nboff = idimp*npbmx*k;
/* find address offset for ordered ppbuff array */
isum = 0;
for (j = 0; j < 8; j++) {
ist = ncl[j+8*k];
ncl[j+8*k] = isum;
isum += ist;
}
nh = ihole[2*(ntmax+1)*k];
ip = 0;
/* loop over particles leaving tile */
for (j = 0; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = ncl[ist+8*k-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ncl[ist+8*k-1] = ii + 1;
}
/* set error */
if (ip > 0)
*irc = ncl[7+8*k];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,ii,kk,npp,npoff,nboff,kx,ky,kl,kr,kxl,kxr,ih,nh,nn, \
ncoff,ist,j1,j2,ip,ks)
for (k = 0; k < mxy1; k++) {
npp = kpic[k];
npoff = idimp*nppmx*k;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 8; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+8*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+8*ks[ii]] - ncoff;
for (j = 0; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
ii = nh;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
for (j = 0; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
}
npp -= ip;
}
kpic[k] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void cpporderf2lt(float ppart[], float ppbuff[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int mx1, int my1,
int npbmx, int ntmax, int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
the algorithm has 2 steps. first, a prefix scan of ncl is performed
and departing particles are buffered in ppbuff in direction order.
then we copy the incoming particles from other tiles into ppart.
it assumes that the number, location, and destination of particles
leaving a tile have been previously stored in ncl and ihole by the
cgppushf2lt procedure.
input: all except ppbuff, irc
output: ppart, ppbuff, kpic, ncl, irc
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
ppbuff[k][i][n] = i co-ordinate of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
int mxy1, npoff, npp, nboff, ncoff;
int i, j, k, ii, kx, ky, ih, nh, ist, nn, isum;
int ip, j1, j2, kxl, kxr, kk, kl, kr;
int ks[8];
mxy1 = mx1*my1;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,npoff,nboff,isum,ist,nh,ip,j1,ii)
for (k = 0; k < mxy1; k++) {
npoff = idimp*nppmx*k;
nboff = idimp*npbmx*k;
/* find address offset for ordered ppbuff array */
isum = 0;
for (j = 0; j < 8; j++) {
ist = ncl[j+8*k];
ncl[j+8*k] = isum;
isum += ist;
}
nh = ihole[2*(ntmax+1)*k];
ip = 0;
/* loop over particles leaving tile */
for (j = 0; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = ncl[ist+8*k-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
ncl[ist+8*k-1] = ii + 1;
}
/* set error */
if (ip > 0)
*irc = ncl[7+8*k];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,ii,kk,npp,npoff,nboff,kx,ky,kl,kr,kxl,kxr,ih,nh,nn, \
ncoff,ist,j1,j2,ip,ks)
for (k = 0; k < mxy1; k++) {
npp = kpic[k];
npoff = idimp*nppmx*k;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 8; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+8*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+8*ks[ii]] - ncoff;
for (j = 0; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp;
npp += 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
ii = nh;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
for (j = 0; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
}
npp -= ip;
}
kpic[k] = npp;
}
return;
}
/*--------------------------------------------------------------------*/
void cvpporder2lt(float ppart[], float ppbuff[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int nx, int ny,
int mx, int my, int mx1, int my1, int npbmx,
int ntmax, int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
algorithm has 3 steps. first, one finds particles leaving tile and
stores their number in each directon, location, and destination in ncl
and ihole. second, a prefix scan of ncl is performed and departing
particles are buffered in ppbuff in direction order. finally, we copy
the incoming particles from other tiles into ppart.
input: all except ppbuff, ncl, ihole, irc
output: ppart, ppbuff, kpic, ncl, ihole, irc
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
ppbuff[k][i][n] = i co-ordinate of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
nx/ny = system length in x/y direction
mx/my = number of grids in sorting cell in x/y
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
#define NPBLK 16
int mxy1, noff, moff, npoff, npp, ipp, joff, nps, nboff, ncoff;
int i, j, k, m, ii, kx, ky, ih, nh, ist, nn, mm, in;
int ip, j1, j2, kxl, kxr, kk, kl, kr, lb, kxs;
float anx, any, edgelx, edgely, edgerx, edgery, dx, dy;
int sncl[8], ks[8];
/* scratch arrays */
int n[NPBLK*3];
mxy1 = mx1*my1;
anx = (float) nx;
any = (float) ny;
/* find and count particles leaving tiles and determine destination */
/* update ppart, ihole, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(j,k,noff,moff,npp,npoff,nn,mm,ih,nh,ist,dx,dy,edgelx,edgely, \
edgerx,edgery)
for (k = 0; k < mxy1; k++) {
noff = k/mx1;
moff = my*noff;
noff = mx*(k - mx1*noff);
npp = kpic[k];
npoff = idimp*nppmx*k;
nn = nx - noff;
nn = mx < nn ? mx : nn;
mm = ny - moff;
mm = my < mm ? my : mm;
ih = 0;
nh = 0;
edgelx = noff;
edgerx = noff + nn;
edgely = moff;
edgery = moff + mm;
/* clear counters */
for (j = 0; j < 8; j++) {
ncl[j+8*k] = 0;
}
/* loop over particles in tile */
for (j = 0; j < npp; j++) {
dx = ppart[j+npoff];
dy = ppart[j+nppmx+npoff];
/* find particles going out of bounds */
ist = 0;
/* count how many particles are going in each direction in ncl */
/* save their address and destination in ihole */
/* use periodic boundary conditions and check for roundoff error */
/* ist = direction particle is going */
if (dx >= edgerx) {
if (dx >= anx)
ppart[j+npoff] = dx - anx;
ist = 2;
}
else if (dx < edgelx) {
if (dx < 0.0) {
dx += anx;
if (dx < anx)
ist = 1;
else
dx = 0.0;
ppart[j+npoff] = dx;
}
else {
ist = 1;
}
}
if (dy >= edgery) {
if (dy >= any)
ppart[j+nppmx+npoff] = dy - any;
ist += 6;
}
else if (dy < edgely) {
if (dy < 0.0) {
dy += any;
if (dy < any)
ist += 3;
else
dy = 0.0;
ppart[j+nppmx+npoff] = dy;
}
else {
ist += 3;
}
}
if (ist > 0) {
ncl[ist+8*k-1] += 1;
ih += 1;
if (ih <= ntmax) {
ihole[2*(ih+(ntmax+1)*k)] = j + 1;
ihole[1+2*(ih+(ntmax+1)*k)] = ist;
}
else {
nh = 1;
}
}
}
/* set error and end of file flag */
if (nh > 0) {
*irc = ih;
ih = -ih;
}
ihole[2*(ntmax+1)*k] = ih;
}
/* ihole overflow */
if (*irc > 0)
return;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,kxs,lb,npoff,nboff,ist,nh,ip,ipp,nps,joff,j1,ii,sncl, \
ks,n)
for (k = 0; k < mxy1; k++) {
npoff = idimp*nppmx*k;
nboff = idimp*npbmx*k;
/* find address offset for ordered ppbuff array */
for (j = 0; j < 8; j++) {
sncl[j] = ncl[j+8*k];
ks[j] = j;
}
kxs = 1;
while (kxs < 8) {
#pragma ivdep
for (j = 0; j < 4; j++) {
lb = kxs*ks[j];
sncl[j+lb+kxs] += sncl[2*lb+kxs-1];
ks[j] >>= 1;
}
kxs <<= 1;
}
for (j = 0; j < 8; j++) {
sncl[j] -= ncl[j+8*k];
}
nh = ihole[2*(ntmax+1)*k];
ip = 0;
/* buffer particles that are leaving tile, in direction order */
/* loop over particles leaving tile */
ipp = nh/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m + 1;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
n[j] = ihole[2*(j+joff+(ntmax+1)*k)] - 1;
n[j+NPBLK] = ihole[1+2*(j+joff+(ntmax+1)*k)];
}
/* calculate offsets */
for (j = 0; j < NPBLK; j++) {
ist = n[j+NPBLK];
ii = sncl[ist-1];
n[j+NPBLK] = ii;
sncl[ist-1] = ii + 1;
}
/* buffer particles that are leaving tile, in direction order */
for (i = 0; i < idimp; i++) {
for (j = 0; j < NPBLK; j++) {
j1 = n[j];
ii = n[j+NPBLK];
if (ii < npbmx) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
else {
ip = 1;
}
}
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = sncl[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
sncl[ist-1] = ii + 1;
}
for (j = 0; j < 8; j++) {
ncl[j+8*k] = sncl[j];
}
/* set error */
if (ip > 0)
*irc = ncl[7+8*k];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,ii,kk,in,npp,npoff,nboff,ipp,joff,nps,kx,ky,kl,kr,kxl, \
kxr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,ks,n)
for (k = 0; k < mxy1; k++) {
npp = kpic[k];
npoff = idimp*nppmx*k;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 8; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+8*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+8*ks[ii]] - ncoff;
/* loop over particles coming from direction ii */
ipp = ip/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* insert incoming particles into holes */
if ((j+ih) < nh) {
j1 = ihole[2*(j+ih+1+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp + j + ih - nh;
}
n[j] = j1;
}
for (i = 0; i < idimp; i++) {
for (j = 0; j < NPBLK; j++) {
j1 = n[j];
if (j1 < nppmx) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+joff+ncoff+npbmx*i+nboff];
}
else {
ist = 1;
}
}
}
ih += NPBLK;
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp + ih - nh - 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
if (ih > nh)
npp = npp + ih - nh;
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
ii = nh;
ipp = ip/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
n[j+NPBLK] = ihole[2*(ih+j+1+(ntmax+1)*k)] - 1;
n[j+2*NPBLK] = ihole[2*(ii-j+(ntmax+1)*k)] - 1;
}
in = 0;
mm = 0;
nn = n[in+2*NPBLK];
for (j = 0; j < NPBLK; j++) {
j1 = npp - j - joff - 1;
n[j] = n[mm+NPBLK];
if (j1==nn) {
in += 1;
nn = n[in+2*NPBLK];
n[j] = -1;
}
else {
mm += 1;
}
}
for (i = 0; i < idimp; i++) {
#pragma ivdep
for (j = 0; j < NPBLK; j++) {
j1 = npp - j - joff - 1;
j2 = n[j];
if (j2 >= 0) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
}
}
ii -= in;
ih += mm;
}
nps = NPBLK*ipp;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
/* loop over remaining particles */
for (j = nps; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
}
npp -= ip;
}
kpic[k] = npp;
}
return;
#undef NPBLK
}
/*--------------------------------------------------------------------*/
void cvpporderf2lt(float ppart[], float ppbuff[], int kpic[], int ncl[],
int ihole[], int idimp, int nppmx, int mx1, int my1,
int npbmx, int ntmax, int *irc) {
/* this subroutine sorts particles by x,y grid in tiles of mx, my
linear interpolation, with periodic boundary conditions
tiles are assumed to be arranged in 2D linear memory
the algorithm has 2 steps. first, a prefix scan of ncl is performed
and departing particles are buffered in ppbuff in direction order.
then we copy the incoming particles from other tiles into ppart.
it assumes that the number, location, and destination of particles
leaving a tile have been previously stored in ncl and ihole by the
cgppushf2lt procedure.
input: all except ppbuff, irc
output: ppart, ppbuff, kpic, ncl, irc
ppart[k][0][n] = position x of particle n in tile k
ppart[k][1][n] = position y of particle n in tile k
ppbuff[k][i][n] = i co-ordinate of particle n in tile k
kpic[k] = number of particles in tile k
ncl[k][i] = number of particles going to destination i, tile k
ihole[k][:][0] = location of hole in array left by departing particle
ihole[k][:][1] = direction destination of particle leaving hole
all for tile k
ihole[k][0][0] = ih, number of holes left (error, if negative)
idimp = size of phase space = 4
nppmx = maximum number of particles in tile
mx1 = (system length in x direction - 1)/mx + 1
my1 = (system length in y direction - 1)/my + 1
npbmx = size of buffer array ppbuff
ntmax = size of hole array for particles leaving tiles
irc = maximum overflow, returned only if error occurs, when irc > 0
local data */
#define NPBLK 16
int mxy1, npoff, npp, nboff, ncoff;
int i, j, k, ii, kx, ky, ih, nh, ist, nn, mm, in;
int ip, j1, j2, kxl, kxr, kk, kl, kr;
int lb, kxs, m, ipp, nps, joff;
int sncl[8], ks[8];
/* scratch arrays */
int n[NPBLK*3];
mxy1 = mx1*my1;
/* buffer particles that are leaving tile: update ppbuff, ncl */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,kxs,lb,npoff,nboff,ist,nh,ip,ipp,nps,joff,j1,ii,sncl, \
ks,n)
for (k = 0; k < mxy1; k++) {
npoff = idimp*nppmx*k;
nboff = idimp*npbmx*k;
/* find address offset for ordered ppbuff array */
for (j = 0; j < 8; j++) {
sncl[j] = ncl[j+8*k];
ks[j] = j;
}
kxs = 1;
while (kxs < 8) {
#pragma ivdep
for (j = 0; j < 4; j++) {
lb = kxs*ks[j];
sncl[j+lb+kxs] += sncl[2*lb+kxs-1];
ks[j] >>= 1;
}
kxs <<= 1;
}
for (j = 0; j < 8; j++) {
sncl[j] -= ncl[j+8*k];
}
nh = ihole[2*(ntmax+1)*k];
ip = 0;
/* buffer particles that are leaving tile, in direction order */
/* loop over particles leaving tile */
ipp = nh/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m + 1;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
n[j] = ihole[2*(j+joff+(ntmax+1)*k)] - 1;
n[j+NPBLK] = ihole[1+2*(j+joff+(ntmax+1)*k)];
}
/* calculate offsets */
for (j = 0; j < NPBLK; j++) {
ist = n[j+NPBLK];
ii = sncl[ist-1];
n[j+NPBLK] = ii;
sncl[ist-1] = ii + 1;
}
/* buffer particles that are leaving tile, in direction order */
for (i = 0; i < idimp; i++) {
for (j = 0; j < NPBLK; j++) {
j1 = n[j];
ii = n[j+NPBLK];
if (ii < npbmx) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
else {
ip = 1;
}
}
}
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < nh; j++) {
/* buffer particles that are leaving tile, in direction order */
j1 = ihole[2*(j+1+(ntmax+1)*k)] - 1;
ist = ihole[1+2*(j+1+(ntmax+1)*k)];
ii = sncl[ist-1];
if (ii < npbmx) {
for (i = 0; i < idimp; i++) {
ppbuff[ii+npbmx*i+nboff]
= ppart[j1+nppmx*i+npoff];
}
}
else {
ip = 1;
}
sncl[ist-1] = ii + 1;
}
for (j = 0; j < 8; j++) {
ncl[j+8*k] = sncl[j];
}
/* set error */
if (ip > 0)
*irc = ncl[7+8*k];
}
/* ppbuff overflow */
if (*irc > 0)
return;
/* copy incoming particles from buffer into ppart: update ppart, kpic */
/* loop over tiles */
#pragma omp parallel for \
private(i,j,k,m,ii,kk,in,npp,npoff,nboff,ipp,joff,nps,kx,ky,kl,kr,kxl, \
kxr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,ks,n)
for (k = 0; k < mxy1; k++) {
npp = kpic[k];
npoff = idimp*nppmx*k;
ky = k/mx1;
/* loop over tiles in y, assume periodic boundary conditions */
kk = ky*mx1;
/* find tile above */
kl = ky - 1;
if (kl < 0)
kl += my1;
kl = kl*mx1;
/* find tile below */
kr = ky + 1;
if (kr >= my1)
kr -= my1;
kr = kr*mx1;
/* loop over tiles in x, assume periodic boundary conditions */
kx = k - ky*mx1;
kxl = kx - 1;
if (kxl < 0)
kxl += mx1;
kxr = kx + 1;
if (kxr >= mx1)
kxr -= mx1;
/* find tile number for different directions */
ks[0] = kxr + kk;
ks[1] = kxl + kk;
ks[2] = kx + kr;
ks[3] = kxr + kr;
ks[4] = kxl + kr;
ks[5] = kx + kl;
ks[6] = kxr + kl;
ks[7] = kxl + kl;
/* loop over directions */
nh = ihole[2*(ntmax+1)*k];
ncoff = 0;
ih = 0;
ist = 0;
j1 = 0;
for (ii = 0; ii < 8; ii++) {
nboff = idimp*npbmx*ks[ii];
if (ii > 0)
ncoff = ncl[ii-1+8*ks[ii]];
/* ip = number of particles coming from direction ii */
ip = ncl[ii+8*ks[ii]] - ncoff;
/* loop over particles coming from direction ii */
ipp = ip/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
/* insert incoming particles into holes */
if ((j+ih) < nh) {
j1 = ihole[2*(j+ih+1+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp + j + ih - nh;
}
n[j] = j1;
}
for (i = 0; i < idimp; i++) {
for (j = 0; j < NPBLK; j++) {
j1 = n[j];
if (j1 < nppmx) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+joff+ncoff+npbmx*i+nboff];
}
else {
ist = 1;
}
}
}
ih += NPBLK;
}
nps = NPBLK*ipp;
/* loop over remaining particles */
for (j = nps; j < ip; j++) {
ih += 1;
/* insert incoming particles into holes */
if (ih <= nh) {
j1 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
/* place overflow at end of array */
else {
j1 = npp + ih - nh - 1;
}
if (j1 < nppmx) {
for (i = 0; i < idimp; i++) {
ppart[j1+nppmx*i+npoff]
= ppbuff[j+ncoff+npbmx*i+nboff];
}
}
else {
ist = 1;
}
}
}
if (ih > nh)
npp = npp + ih - nh;
/* set error */
if (ist > 0)
*irc = j1+1;
/* fill up remaining holes in particle array with particles from bottom */
/* holes with locations great than npp-ip do not need to be filled */
if (ih < nh) {
ip = nh - ih;
/* move particles from end into remaining holes */
/* holes are processed in increasing order */
ii = nh;
ipp = ip/NPBLK;
/* outer loop over number of full blocks */
for (m = 0; m < ipp; m++) {
joff = NPBLK*m;
/* inner loop over particles in block */
for (j = 0; j < NPBLK; j++) {
n[j+NPBLK] = ihole[2*(ih+j+1+(ntmax+1)*k)] - 1;
n[j+2*NPBLK] = ihole[2*(ii-j+(ntmax+1)*k)] - 1;
}
in = 0;
mm = 0;
nn = n[in+2*NPBLK];
for (j = 0; j < NPBLK; j++) {
j1 = npp - j - joff - 1;
n[j] = n[mm+NPBLK];
if (j1==nn) {
in += 1;
nn = n[in+2*NPBLK];
n[j] = -1;
}
else {
mm += 1;
}
}
for (i = 0; i < idimp; i++) {
#pragma ivdep
for (j = 0; j < NPBLK; j++) {
j1 = npp - j - joff - 1;
j2 = n[j];
if (j2 >= 0) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
}
}
ii -= in;
ih += mm;
}
nps = NPBLK*ipp;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
/* loop over remaining particles */
for (j = nps; j < ip; j++) {
j1 = npp - j - 1;
if (j1==nn) {
ii -= 1;
nn = ihole[2*(ii+(ntmax+1)*k)] - 1;
}
else {
for (i = 0; i < idimp; i++) {
ppart[j2+nppmx*i+npoff]
= ppart[j1+nppmx*i+npoff];
}
ih += 1;
j2 = ihole[2*(ih+(ntmax+1)*k)] - 1;
}
}
npp -= ip;
}
kpic[k] = npp;
}
return;
#undef NPBLK
}
/*--------------------------------------------------------------------*/
void cbguard2l(float bxy[], int nx, int ny, int nxe, int nye) {
/* replicate extended periodic vector field bxy
linear interpolation
nx/ny = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nxe = second dimension of field arrays, must be >= ny+1
local data */
#define N 4
int j, k, kk;
/* copy edges of extended field */
for (k = 0; k < ny; k++) {
kk = N*nxe*k;
bxy[N*nx+kk] = bxy[kk];
bxy[1+N*nx+kk] = bxy[1+kk];
bxy[2+N*nx+kk] = bxy[2+kk];
}
kk = N*nxe*ny;
for (j = 0; j < nx; j++) {
bxy[N*j+kk] = bxy[N*j];
bxy[1+N*j+kk] = bxy[1+N*j];
bxy[2+N*j+kk] = bxy[2+N*j];
}
bxy[N*nx+kk] = bxy[0];
bxy[1+N*nx+kk] = bxy[1];
bxy[2+N*nx+kk] = bxy[2];
return;
#undef N
}
/*--------------------------------------------------------------------*/
void cacguard2l(float cu[], int nx, int ny, int nxe, int nye) {
/* accumulate extended periodic vector field cu
linear interpolation
nx/ny = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nxe = second dimension of field arrays, must be >= ny+1
local data */
#define N 4
int j, k, kk;
/* accumulate edges of extended field */
for (k = 0; k < ny; k++) {
kk = N*nxe*k;
cu[kk] += cu[N*nx+kk];
cu[1+kk] += cu[1+N*nx+kk];
cu[2+kk] += cu[2+N*nx+kk];
cu[N*nx+kk] = 0.0;
cu[1+N*nx+kk] = 0.0;
cu[2+N*nx+kk] = 0.0;
}
kk = N*nxe*ny;
for (j = 0; j < nx; j++) {
cu[N*j] += cu[N*j+kk];
cu[1+N*j] += cu[1+N*j+kk];
cu[2+N*j] += cu[2+N*j+kk];
cu[N*j+kk] = 0.0;
cu[1+N*j+kk] = 0.0;
cu[2+N*j+kk] = 0.0;
}
cu[0] += cu[N*nx+kk];
cu[1] += cu[1+N*nx+kk];
cu[2] += cu[2+N*nx+kk];
cu[N*nx+kk] = 0.0;
cu[1+N*nx+kk] = 0.0;
cu[2+N*nx+kk] = 0.0;
return;
#undef N
}
/*--------------------------------------------------------------------*/
void caguard2l(float q[], int nx, int ny, int nxe, int nye) {
/* accumulate extended periodic scalar field q
linear interpolation
nx/ny = system length in x/y direction
nxe = first dimension of field arrays, must be >= nx+1
nxe = second dimension of field arrays, must be >= ny+1
local data */
int j, k;
/* accumulate edges of extended field */
for (k = 0; k < ny; k++) {
q[nxe*k] += q[nx+nxe*k];
q[nx+nxe*k] = 0.0;
}
for (j = 0; j < nx; j++) {
q[j] += q[j+nxe*ny];
q[j+nxe*ny] = 0.0;
}
q[0] += q[nx+nxe*ny];
q[nx+nxe*ny] = 0.0;
return;
}
/*--------------------------------------------------------------------*/
void cvmpois23(float complex q[], float complex fxy[], int isign,
float complex ffc[], float ax, float ay, float affp,
float *we, int nx, int ny, int nxvh, int nyv, int nxhd,
int nyhd) {
/* this subroutine solves 2-1/2d poisson's equation in fourier space for
force/charge (or convolution of electric field over particle shape)
with periodic boundary conditions. Zeros out z component.
for isign = 0, input: isign,ax,ay,affp,nx,ny,nxvh,nyhd, output: ffc
for isign /= 0, input: q,ffc,isign,nx,ny,nxvh,nyhd, output: fxy,we
approximate flop count is: 26*nxc*nyc + 12*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
equation used is:
fx[ky][kx] = -sqrt(-1)*kx*g[ky][kx]*s[ky][kx]*q[ky][kx],
fy[ky][kx] = -sqrt(-1)*ky*g[ky][kx]*s[ky][kx]*q[ky][kx],
fz[ky][kx] = zero,
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
g[ky][kx] = (affp/(kx**2+ky**2))*s[ky][kx],
s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for
fx(kx=pi) = fy(kx=pi) = fx(ky=pi) = fy(ky=pi) = 0, and
fx(kx=0,ky=0) = fy(kx=0,ky=0) = 0.
q[k][j] = complex charge density for fourier mode (j,k)
fxy[k][j][0] = x component of complex force/charge,
fxy[k][j][1] = y component of complex force/charge,
fxy[k][j][2] = zero,
all for fourier mode (j,k)
if isign = 0, form factor array is prepared
if isign is not equal to 0, force/charge is calculated
cimag(ffc[k][j]) = finite-size particle shape factor s
for fourier mode (j,k)
creal(ffc[k][j]) = potential green's function g
for fourier mode (j,k)
ax/ay = half-width of particle in x/y direction
affp = normalization constant = nx*ny/np, where np=number of particles
electric field energy is also calculated, using
we = nx*ny*sum((affp/(kx**2+ky**2))*|q[ky][kx]*s[ky][kx]|**2)
nx/ny = system length in x/y direction
nxvh = second dimension of field arrays, must be >= nxh
nyv = third dimension of field arrays, must be >= ny
nxhd = first dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
local data */
#define N 4
int nxh, nyh, j, k, k1, kk, kj;
float dnx, dny, dkx, dky, at1, at2, at3, at4;
float complex zero, zt1, zt2;
double wp, sum1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
if (isign != 0)
goto L30;
/* prepare form factor array */
for (k = 0; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
at1 = dky*dky;
at2 = pow((dky*ay),2);
for (j = 0; j < nxh; j++) {
dkx = dnx*(float) j;
at3 = dkx*dkx + at1;
at4 = exp(-0.5*(pow((dkx*ax),2) + at2));
if (at3==0.0) {
ffc[j+kk] = affp + 1.0*_Complex_I;
}
else {
ffc[j+kk] = (affp*at4/at3) + at4*_Complex_I;
}
}
}
return;
/* calculate force/charge and sum field energy */
L30: sum1 = 0.0;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp) \
reduction(+:sum1)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = nxvh*k;
k1 = nxvh*ny - kj;
wp = 0.0;
#pragma ivdep
for (j = 1; j < nxh; j++) {
at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I;
zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I;
fxy[N*(j+kj)] = at2*zt1;
fxy[1+N*(j+kj)] = at3*zt1;
fxy[2+N*(j+kj)] = zero;
fxy[N*(j+k1)] = at2*zt2;
fxy[1+N*(j+k1)] = -at3*zt2;
fxy[2+N*(j+k1)] = zero;
at1 = at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1]));
wp += (double) at1;
}
/* mode numbers kx = 0, nx/2 */
at1 = crealf(ffc[kk])*cimagf(ffc[kk]);
at3 = at1*dny*(float) k;
zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I;
fxy[N*kj] = zero;
fxy[1+N*kj] = at3*zt1;
fxy[2+N*kj] = zero;
fxy[N*k1] = zero;
fxy[1+N*k1] = zero;
fxy[2+N*k1] = zero;
at1 = at1*(q[kj]*conjf(q[kj]));
wp += (double) at1;
sum1 += wp;
}
wp = 0.0;
/* mode numbers ky = 0, ny/2 */
k1 = N*nxvh*nyh;
#pragma ivdep
for (j = 1; j < nxh; j++) {
at1 = crealf(ffc[j])*cimagf(ffc[j]);
at2 = at1*dnx*(float) j;
zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I;
fxy[N*j] = at2*zt1;
fxy[1+N*j] = zero;
fxy[2+N*j] = zero;
fxy[N*j+k1] = zero;
fxy[1+N*j+k1] = zero;
fxy[2+N*j+k1] = zero;
at1 = at1*(q[j]*conjf(q[j]));
wp += (double) at1;
}
fxy[0] = zero;
fxy[1] = zero;
fxy[2] = zero;
fxy[k1] = zero;
fxy[1+k1] = zero;
fxy[2+k1] = zero;
sum1 += wp;
*we = sum1*(float) (nx*ny);
return;
#undef N
}
/*--------------------------------------------------------------------*/
void cmcuperp2(float complex cu[], int nx, int ny, int nxvh, int nyv) {
/* this subroutine calculates the transverse current in fourier space
input: all, output: cu
approximate flop count is: 36*nxc*nyc
and nxc*nyc divides
where nxc = nx/2 - 1, nyc = ny/2 - 1
the transverse current is calculated using the equation:
cux[ky][kx] = cux[ky][kx]
-kx*(kx*cux[ky][kx]+ky*cuy[ky][kx])/(kx*kx+ky*ky)
cuy[ky][kx] = cuy[ky][kx]
-ky*(kx*cux[ky][kx]+ky*cuy[ky][kx])/(kx*kx+ky*ky)
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
except for cux(kx=pi) = cuy(kx=pi) = 0, cux(ky=pi) = cuy(ky=pi) = 0,
and cux(kx=0,ky=0) = cuy(kx=0,ky=0) = 0.
cu[k][j][i] = complex current density for fourier mode (j,k)
nx/ny = system length in x/y direction
nxvh = second dimension of current array, must be >= nxh
nyv = third dimension of current array, must be >= ny
local data */
#define N 4
int nxh, nyh, j, k, k1, kj;
float dnx, dny, dkx, dky, dky2, at1;
float complex zero, zt1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
/* calculate transverse part of current */
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for private(j,k,k1,kj,dky,dky2,dkx,at1,zt1)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
dky2 = dky*dky;
kj = N*nxvh*k;
k1 = N*nxvh*ny - kj;
#pragma ivdep
for (j = 1; j < nxh; j++) {
dkx = dnx*(float) j;
at1 = 1./(dkx*dkx + dky2);
zt1 = at1*(dkx*cu[N*j+kj] + dky*cu[1+N*j+kj]);
cu[N*j+kj] -= dkx*zt1;
cu[1+N*j+kj] -= dky*zt1;
zt1 = at1*(dkx*cu[N*j+k1] - dky*cu[1+N*j+k1]);
cu[N*j+k1] -= dkx*zt1;
cu[1+N*j+k1] += dky*zt1;
}
/* mode numbers kx = 0, nx/2 */
cu[1+kj] = zero;
cu[k1] = zero;
cu[1+k1] = zero;
}
/* mode numbers ky = 0, ny/2 */
k1 = N*nxvh*nyh;
#pragma ivdep
for (j = 1; j < nxh; j++) {
cu[N*j] = zero;
cu[N*j+k1] = zero;
cu[1+N*j+k1] = zero;
}
cu[0] = zero;
cu[1] = zero;
cu[k1] = zero;
cu[1+k1] = zero;
return;
#undef N
}
/*--------------------------------------------------------------------*/
void cvmibpois23(float complex cu[], float complex bxy[],
float complex ffc[], float ci, float *wm, int nx,
int ny, int nxvh, int nyv, int nxhd, int nyhd) {
/* this subroutine solves 2-1/2d poisson's equation in fourier space for
magnetic field, with periodic boundary conditions.
input: cu,ffc,ci,nx,ny,nxv,nyhd, output: bxy,wm
approximate flop count is: 90*nxc*nyc + 40*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
the magnetic field is calculated using the equations:
bx[ky][kx] = ci*ci*sqrt(-1)*g[ky][kx]*ky*cuz[ky][kx],
by[ky][kx] = -ci*ci*sqrt(-1)*g[ky][kx]*kx*cuz[ky][kx],
bz[ky][kx] = ci*ci*sqrt(-1)*g[ky][kx]*(kx*cuy[ky][kx]-ky*cux[ky][kx]),
where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers,
g[ky][kx] = (affp/(kx**2+ky**2))*s[ky][kx],
s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for
bx(kx=pi) = by(kx=pi) = bz(kx=pi) = bx(ky=pi) = by(ky=pi) = bz(ky=pi)
= 0, and bx(kx=0,ky=0) = by(kx=0,ky=0) = bz(kx=0,ky=0) = 0.
cu[k][j][i] = complex current density for fourier mode (j,k)
bxy[k][j][i] = i component of complex magnetic field
all for fourier mode (j,k)
cimag(ffc[k][j]) = finite-size particle shape factor s
for fourier mode (j,k)
creal(ffc[k][j]) = potential green's function g
for fourier mode (j,k)
ci = reciprocal of velocity of light
magnetic field energy is also calculated, using
wm = nx*ny*sum((affp/(kx**2+ky**2))*ci*ci*
|cu[ky][kx]*s[ky][kx]|**2), where
affp = normalization constant = nx*ny/np, where np=number of particles
this expression is valid only if the current is divergence-free
nx/ny = system length in x/y direction
nxvh = second dimension of field arrays, must be >= nxh
nyv = third dimension of field arrays, must be >= ny
nxhd = first dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
local data */
#define N 4
int nxh, nyh, j, k, k1, kk, kj;
float dnx, dny, dky, ci2, at1, at2, at3;
float complex zero, zt1, zt2, zt3;
double wp, sum1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
zero = 0.0 + 0.0*_Complex_I;
ci2 = ci*ci;
/* calculate magnetic field and sum field energy */
sum1 = 0.0;
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,zt3,wp) \
reduction(+:sum1)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = N*nxvh*k;
k1 = N*nxvh*ny - kj;
wp = 0.0;
#pragma ivdep
for (j = 1; j < nxh; j++) {
at1 = ci2*crealf(ffc[j+kk]);
at2 = at1*dnx*(float) j;
at3 = dky*at1;
at1 = at1*cimagf(ffc[j+kk]);
zt1 = -cimagf(cu[2+N*j+kj])
+ crealf(cu[2+N*j+kj])*_Complex_I;
zt2 = -cimagf(cu[1+N*j+kj])
+ crealf(cu[1+N*j+kj])*_Complex_I;
zt3 = -cimagf(cu[N*j+kj]) + crealf(cu[N*j+kj])*_Complex_I;
bxy[N*j+kj] = at3*zt1;
bxy[1+N*j+kj] = -at2*zt1;
bxy[2+N*j+kj] = at2*zt2 - at3*zt3;
zt1 = -cimagf(cu[2+N*j+k1])
+ crealf(cu[2+N*j+k1])*_Complex_I;
zt2 = -cimagf(cu[1+N*j+k1])
+ crealf(cu[1+N*j+k1])*_Complex_I;
zt3 = -cimagf(cu[N*j+k1]) + crealf(cu[N*j+k1])*_Complex_I;
bxy[N*j+k1] = -at3*zt1;
bxy[1+N*j+k1] = -at2*zt1;
bxy[2+N*j+k1] = at2*zt2 + at3*zt3;
at1 = at1*(cu[N*j+kj]*conjf(cu[N*j+kj])
+ cu[1+N*j+kj]*conjf(cu[1+N*j+kj])
+ cu[2+N*j+kj]*conjf(cu[2+N*j+kj])
+ cu[N*j+k1]*conjf(cu[N*j+k1])
+ cu[1+N*j+k1]*conjf(cu[1+N*j+k1])
+ cu[2+N*j+k1]*conjf(cu[2+N*j+k1]));
wp += (double) at1;
}
/* mode numbers kx = 0, nx/2 */
at1 = ci2*crealf(ffc[kk]);
at3 = at1*dny*(float) k;
at1 = at1*cimagf(ffc[kk]);
zt1 = -cimagf(cu[2+kj]) + crealf(cu[2+kj])*_Complex_I;
zt3 = -cimagf(cu[kj]) + crealf(cu[kj])*_Complex_I;
bxy[kj] = at3*zt1;
bxy[1+kj] = zero;
bxy[2+kj] = -at3*zt3;
bxy[k1] = zero;
bxy[1+k1] = zero;
bxy[2+k1] = zero;
at1 = at1*(cu[kj]*conjf(cu[kj]) + cu[1+kj]*conjf(cu[1+kj])
+ cu[2+kj]*conjf(cu[2+kj]));
wp += (double) at1;
sum1 += wp;
}
wp = 0.0;
/* mode numbers ky = 0, ny/2 */
k1 = N*nxvh*nyh;
#pragma ivdep
for (j = 1; j < nxh; j++) {
at1 = ci2*crealf(ffc[j]);
at2 = at1*dnx*(float) j;
at1 = at1*cimagf(ffc[j]);
zt1 = -cimagf(cu[2+N*j]) + crealf(cu[2+N*j])*_Complex_I;
zt2 = -cimagf(cu[1+N*j]) + crealf(cu[1+N*j])*_Complex_I;
bxy[N*j] = zero;
bxy[1+N*j] = -at2*zt1;
bxy[2+N*j] = at2*zt2;
bxy[N*j+k1] = zero;
bxy[1+N*j+k1] = zero;
bxy[2+N*j+k1] = zero;
at1 = at1*(cu[N*j]*conjf(cu[N*j]) + cu[1+N*j]*conjf(cu[1+N*j])
+ cu[2+N*j]*conjf(cu[2+N*j]));
wp += (double) at1;
}
bxy[0] = zero;
bxy[1] = zero;
bxy[2] = zero;
bxy[k1] = zero;
bxy[1+k1] = zero;
bxy[2+k1] = zero;
sum1 += wp;
*wm = sum1*(float) (nx*ny);
return;
#undef N
}
/*--------------------------------------------------------------------*/
void cvmmaxwel2(float complex exy[], float complex bxy[],
float complex cu[], float complex ffc[], float ci,
float dt, float *wf, float *wm, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* this subroutine solves 2-1/2d maxwell's equation in fourier space for
transverse electric and magnetic fields with periodic boundary
conditions
input: all, output: wf, wm, exy, bxy
approximate flop count is: 286*nxc*nyc + 84*(nxc + nyc)
where nxc = nx/2 - 1, nyc = ny/2 - 1
the magnetic field is first updated half a step using the equations:
bx[ky][kx] = bx[ky][kx] - .5*dt*sqrt(-1)*ky*ez[ky][kx]
by[ky][kx] = by[ky][kx] + .5*dt*sqrt(-1)*kx*ez[ky][kx]
bz[ky][kx] = bz[ky][kx] - .5*dt*sqrt(-1)*(kx*ey[ky][kx]-ky*ex[ky][kx])
the electric field is then updated a whole step using the equations:
ex[ky][kx] = ex[ky][kx] + c2*dt*sqrt(-1)*ky*bz[ky][kx]
- affp*dt*cux[ky][kx]*s[ky][kx]
ey[ky][kx] = ey[ky][kx] - c2*dt*sqrt(-1)*kx*bz[ky][kx]
- affp*dt*cuy[ky][kx]*s[ky][kx]
ez[ky][kx] = ez[ky][kx] + c2*dt*sqrt(-1)*(kx*by[ky][kx]-ky*bx[ky][kx])
- affp*dt*cuz[ky][kx]*s[ky][kx]
the magnetic field is finally updated the remaining half step with
the new electric field and the previous magnetic field equations.
where kx = 2pi*j/nx, ky = 2pi*k/ny, c2 = 1./(ci*ci)
and s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)
j,k = fourier mode numbers, except for
ex(kx=pi) = ey(kx=pi) = ez(kx=pi) = 0,
ex(ky=pi) = ey(ky=pi) = ex(ky=pi) = 0,
ex(kx=0,ky=0) = ey(kx=0,ky=0) = ez(kx=0,ky=0) = 0.
and similarly for bx, by, bz.
cu[k][j][i] = complex current density
exy[k][j][i] = complex transverse electric field
bxy[k][j][i] = complex magnetic field
for component i, all for fourier mode (j,k)
creal(ffc[0][0]) = affp = normalization constant = nx*ny/np,
where np=number of particles
cimag(ffc[k][j]) = finite-size particle shape factor s.
s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2)
for fourier mode (j-1,k-1)
ci = reciprocal of velocity of light
dt = time interval between successive calculations
transverse electric field energy is also calculated, using
wf = nx*ny**sum((1/affp)*|exy[ky][kx]|**2)
magnetic field energy is also calculated, using
wm = nx*ny**sum((c2/affp)*|bxy[ky][kx]|**2)
nx/ny = system length in x/y direction
nxvh = second dimension of field arrays, must be >= nxh
nyv = third dimension of field arrays, must be >= ny
nxhd = first dimension of form factor array, must be >= nxh
nyhd = second dimension of form factor array, must be >= nyh
local data */
#define N 4
int nxh, nyh, j, k, k1, kk, kj;
float dnx, dny, dth, c2, cdt, affp, anorm, dkx, dky, afdt, adt;
float at1;
float complex zero, zt1, zt2, zt3, zt4, zt5, zt6, zt7, zt8, zt9;
double wp, ws, sum1, sum2;
if (ci <= 0.0)
return;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
dnx = 6.28318530717959/(float) nx;
dny = 6.28318530717959/(float) ny;
dth = 0.5*dt;
c2 = 1.0/(ci*ci);
cdt = c2*dt;
affp = creal(ffc[0]);
adt = affp*dt;
zero = 0.0 + 0.0*_Complex_I;
anorm = 1.0/affp;
/* update electromagnetic field and sum field energies */
sum1 = 0.0;
sum2 = 0.0;
/* calculate the electromagnetic fields */
/* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */
#pragma omp parallel for \
private(j,k,k1,kk,kj,dky,dkx,afdt,at1,zt1,zt2,zt3,zt4,zt5,zt6,zt7,zt8, \
zt9,ws,wp) \
reduction(+:sum1,sum2)
for (k = 1; k < nyh; k++) {
dky = dny*(float) k;
kk = nxhd*k;
kj = N*nxvh*k;
k1 = N*nxvh*ny - kj;
ws = 0.0;
wp = 0.0;
#pragma ivdep
for (j = 1; j < nxh; j++) {
dkx = dnx*(float) j;
afdt = adt*cimagf(ffc[j+kk]);
/* update magnetic field half time step, ky > 0 */
zt1 = -cimagf(exy[2+N*j+kj])
+ crealf(exy[2+N*j+kj])*_Complex_I;
zt2 = -cimagf(exy[1+N*j+kj])
+ crealf(exy[1+N*j+kj])*_Complex_I;
zt3 = -cimagf(exy[N*j+kj]) + crealf(exy[N*j+kj])*_Complex_I;
zt4 = bxy[N*j+kj] - dth*(dky*zt1);
zt5 = bxy[1+N*j+kj] + dth*(dkx*zt1);
zt6 = bxy[2+N*j+kj] - dth*(dkx*zt2 - dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exy[N*j+kj] + cdt*(dky*zt1) - afdt*cu[N*j+kj];
zt8 = exy[1+N*j+kj] - cdt*(dkx*zt1) - afdt*cu[1+N*j+kj];
zt9 = exy[2+N*j+kj] + cdt*(dkx*zt2 - dky*zt3)
- afdt*cu[2+N*j+kj];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exy[N*j+kj] = zt7;
exy[1+N*j+kj] = zt8;
exy[2+N*j+kj] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1);
zt5 += dth*(dkx*zt1);
zt6 -= dth*(dkx*zt2 - dky*zt3);
bxy[N*j+kj] = zt4;
bxy[1+N*j+kj] = zt5;
bxy[2+N*j+kj] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
/* update magnetic field half time step, ky < 0 */
zt1 = -cimagf(exy[2+N*j+k1])
+ crealf(exy[2+N*j+k1])*_Complex_I;
zt2 = -cimagf(exy[1+N*j+k1])
+ crealf(exy[1+N*j+k1])*_Complex_I;
zt3 = -cimagf(exy[N*j+k1]) + crealf(exy[N*j+k1])*_Complex_I;
zt4 = bxy[N*j+k1] + dth*(dky*zt1);
zt5 = bxy[1+N*j+k1] + dth*(dkx*zt1);
zt6 = bxy[2+N*j+k1] - dth*(dkx*zt2 + dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exy[N*j+k1] - cdt*(dky*zt1) - afdt*cu[N*j+k1];
zt8 = exy[1+N*j+k1] - cdt*(dkx*zt1) - afdt*cu[1+N*j+k1];
zt9 = exy[2+N*j+k1] + cdt*(dkx*zt2 + dky*zt3)
- afdt*cu[2+N*j+k1];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exy[N*j+k1] = zt7;
exy[1+N*j+k1] = zt8;
exy[2+N*j+k1] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt4 += dth*(dky*zt1);
zt5 += dth*(dkx*zt1);
zt6 -= dth*(dkx*zt2 + dky*zt3);
bxy[N*j+k1] = zt4;
bxy[1+N*j+k1] = zt5;
bxy[2+N*j+k1] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
}
/* mode numbers kx = 0, nx/2 */
afdt = adt*cimagf(ffc[kk]);
/* update magnetic field half time step */
zt1 = -cimagf(exy[2+kj]) + crealf(exy[2+kj])*_Complex_I;
zt3 = -cimagf(exy[kj]) + crealf(exy[kj])*_Complex_I;
zt4 = bxy[kj] - dth*(dky*zt1);
zt6 = bxy[2+kj] + dth*(dky*zt3);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I;
zt7 = exy[kj] + cdt*(dky*zt1) - afdt*cu[kj];
zt9 = exy[2+kj] - cdt*(dky*zt3) - afdt*cu[2+kj];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I;
exy[kj] = zt7;
exy[1+kj] = zero;
exy[2+kj] = zt9;
at1 = anorm*(zt7*conjf(zt7) + zt9*conjf(zt9));
ws += (double) at1;
zt4 -= dth*(dky*zt1);
zt6 += dth*(dky*zt3);
bxy[kj] = zt4;
bxy[1+kj] = zero;
bxy[2+kj] = zt6;
at1 = anorm*(zt4*conjf(zt4) + zt6*conjf(zt6));
wp += (double) at1;
bxy[k1] = zero;
bxy[1+k1] = zero;
bxy[2+k1] = zero;
exy[k1] = zero;
exy[1+k1] = zero;
exy[2+k1] = zero;
sum1 += ws;
sum2 += wp;
}
ws = 0.0;
wp = 0.0;
/* mode numbers ky = 0, ny/2 */
k1 = N*nxvh*nyh;
#pragma ivdep
for (j = 1; j < nxh; j++) {
dkx = dnx*(float) j;
afdt = adt*cimagf(ffc[j]);
/* update magnetic field half time step */
zt1 = -cimagf(exy[2+N*j]) + crealf(exy[2+N*j])*_Complex_I;
zt2 = -cimagf(exy[1+N*j]) + crealf(exy[1+N*j])*_Complex_I;
zt5 = bxy[1+N*j] + dth*(dkx*zt1);
zt6 = bxy[2+N*j] - dth*(dkx*zt2);
/* update electric field whole time step */
zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I;
zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I;
zt8 = exy[1+N*j] - cdt*(dkx*zt1) - afdt*cu[1+N*j];
zt9 = exy[2+N*j] + cdt*(dkx*zt2) - afdt*cu[2+N*j];
/* update magnetic field half time step and store electric field */
zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I;
zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I;
exy[N*j] = zero;
exy[1+N*j] = zt8;
exy[2+N*j] = zt9;
at1 = anorm*(zt8*conjf(zt8) + zt9*conjf(zt9));
ws += (double) at1;
zt5 += dth*(dkx*zt1);
zt6 -= dth*(dkx*zt2);
bxy[N*j] = zero;
bxy[1+N*j] = zt5;
bxy[2+N*j] = zt6;
at1 = anorm*(zt5*conjf(zt5) + zt6*conjf(zt6));
wp += (double) at1;
bxy[N*j+k1] = zero;
bxy[1+N*j+k1] = zero;
bxy[2+N*j+k1] = zero;
exy[N*j+k1] = zero;
exy[1+N*j+k1] = zero;
exy[2+N*j+k1] = zero;
}
bxy[0] = zero;
bxy[1] = zero;
bxy[2] = zero;
exy[0] = zero;
exy[1] = zero;
exy[2] = zero;
bxy[k1] = zero;
bxy[1+k1] = zero;
bxy[2+k1] = zero;
exy[k1] = zero;
exy[1+k1] = zero;
exy[2+k1] = zero;
sum1 += ws;
sum2 += wp;
*wf = sum1*(float) (nx*ny);
*wm = sum2*c2*(float) (nx*ny);
return;
#undef N
}
/*--------------------------------------------------------------------*/
void cvmemfield2(float complex fxy[], float complex exy[],
float complex ffc[], int isign, int nx, int ny,
int nxvh, int nyv, int nxhd, int nyhd) {
/* this subroutine either adds complex vector fields if isign > 0
or copies complex vector fields if isign < 0
includes additional smoothing
local data */
#define N 4
int j, k, nxh, nyh, k1, kk, kj;
float at1;
nxh = nx/2;
nyh = 1 > ny/2 ? 1 : ny/2;
/* add the fields */
if (isign > 0) {
#pragma omp parallel for private(j,k,k1,kk,kj,at1)
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = N*nxvh*k;
k1 = N*nxvh*ny - kj;
#pragma ivdep
for (j = 0; j < nxh; j++) {
at1 = cimagf(ffc[j+kk]);
fxy[N*j+kj] += exy[N*j+kj]*at1;
fxy[1+N*j+kj] += exy[1+N*j+kj]*at1;
fxy[2+N*j+kj] += exy[2+N*j+kj]*at1;
fxy[N*j+k1] += exy[N*j+k1]*at1;
fxy[1+N*j+k1] += exy[1+N*j+k1]*at1;
fxy[2+N*j+k1] += exy[2+N*j+k1]*at1;
}
}
k1 = N*nxvh*nyh;
#pragma ivdep
for (j = 0; j < nxh; j++) {
at1 = cimagf(ffc[j]);
fxy[N*j] += exy[N*j]*at1;
fxy[1+N*j] += exy[1+N*j]*at1;
fxy[2+N*j] += exy[2+N*j]*at1;
fxy[N*j+k1] += exy[N*j+k1]*at1;
fxy[1+N*j+k1] += exy[1+N*j+k1]*at1;
fxy[2+N*j+k1] += exy[2+N*j+k1]*at1;
}
}
/* copy the fields */
else if (isign < 0) {
#pragma omp parallel for private(j,k,k1,kk,kj,at1)
for (k = 1; k < nyh; k++) {
kk = nxhd*k;
kj = N*nxvh*k;
k1 = N*nxvh*ny - kj;
#pragma ivdep
for (j = 0; j < nxh; j++) {
at1 = cimagf(ffc[j+kk]);
fxy[N*j+kj] = exy[N*j+kj]*at1;
fxy[1+N*j+kj] = exy[1+N*j+kj]*at1;
fxy[2+N*j+kj] = exy[2+N*j+kj]*at1;
fxy[N*j+k1] = exy[N*j+k1]*at1;
fxy[1+N*j+k1] = exy[1+N*j+k1]*at1;
fxy[2+N*j+k1] = exy[2+N*j+k1]*at1;
}
}
k1 = N*nxvh*nyh;
for (j = 0; j < nxh; j++) {
at1 = cimagf(ffc[j]);
fxy[N*j] = exy[N*j]*at1;
fxy[1+N*j] = exy[1+N*j]*at1;
fxy[2+N*j] = exy[2+N*j]*at1;
fxy[N*j+k1] = exy[N*j+k1]*at1;
fxy[1+N*j+k1] = exy[1+N*j+k1]*at1;
fxy[2+N*j+k1] = exy[2+N*j+k1]*at1;
}
}
return;
#undef N
}
/*--------------------------------------------------------------------*/
void cwfft2rinit(int mixup[], float complex sct[], int indx, int indy,
int nxhyd, int nxyhd) {
/* this subroutine calculates tables needed by a two dimensional
real to complex fast fourier transform and its inverse.
input: indx, indy, nxhyd, nxyhd
output: mixup, sct
mixup = array of bit reversed addresses
sct = sine/cosine table
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
nxhyd = maximum of (nx/2,ny)
nxyhd = one half of maximum of (nx,ny)
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, ny, nxy, nxhy, nxyh;
int j, k, lb, ll, jb, it;
float dnxy, arg;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
/* bit-reverse index table: mixup[j] = 1 + reversed bits of j */
for (j = 0; j < nxhy; j++) {
lb = j;
ll = 0;
for (k = 0; k < indx1y; k++) {
jb = lb/2;
it = lb - 2*jb;
lb = jb;
ll = 2*ll + it;
}
mixup[j] = ll + 1;
}
/* sine/cosine table for the angles 2*n*pi/nxy */
nxyh = nxy/2;
dnxy = 6.28318530717959/(float) nxy;
for (j = 0; j < nxyh; j++) {
arg = dnxy*(float) j;
sct[j] = cosf(arg) - sinf(arg)*_Complex_I;
}
return;
}
/*--------------------------------------------------------------------*/
void cfft2rvmxx(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nyi,
int nyp, int nxhd, int nyd, int nxhyd, int nxyhd) {
/* this subroutine performs the x part of a two dimensional real to
complex fast fourier transform and its inverse, for a subset of y,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform in x is performed
f[m][n] = (1/nx*ny)*sum(f[k][j]*exp(-sqrt(-1)*2pi*n*j/nx))
if isign = 1, a forward fourier transform in x is performed
f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*n*j/nx))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = first dimension of f >= nx/2
nyd = second dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
fourier coefficients are stored as follows:
f[k][j] = real, imaginary part of mode j,k, where
0 <= j < nx/2 and 0 <= k < ny, except for
f[k][1] = real, imaginary part of mode nx/2,k, where
ny/2+1 <= k < ny, and
imag(f[0][0]) = real part of mode nx/2,0 and
imag(f[0][ny/2]) = real part of mode nx/2,ny/2
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt;
int nrx, i, j, k, l, j1, k1, k2, ns, ns2, km, kmr, nrxb, joff;
float ani;
float complex t1, t2, t3;
if (isign==0)
return;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nyt = nyi + nyp - 1;
if (isign > 0)
goto L70;
/* inverse fourier transform */
nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,joff,ani,t1,t2,t3)
for (i = nyi-1; i < nyt; i++) {
joff = nxhd*i;
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
/* then transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
t1 = sct[kmr*j];
t2 = t1*f[j+k2+joff];
f[j+k2+joff] = f[j+k1+joff] - t2;
f[j+k1+joff] += t2;
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxy/nx;
ani = 0.5/(((float) nx)*((float) ny));
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = ani*(t1 + t2);
f[nxh-j+joff] = ani*conjf(t1 - t2);
}
ani = 2.0*ani;
f[nxhh+joff] = ani*conjf(f[nxhh+joff]);
f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I);
}
return;
/* forward fourier transform */
L70: nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,joff,t1,t2,t3)
for (i = nyi-1; i < nyt; i++) {
joff = nxhd*i;
/* scramble coefficients */
kmr = nxy/nx;
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
t2 = conjf(f[nxh-j+joff]);
t1 = f[j+joff] + t2;
t2 = (f[j+joff] - t2)*t3;
f[j+joff] = t1 + t2;
f[nxh-j+joff] = conjf(t1 - t2);
}
f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]);
f[joff] = (crealf(f[joff]) + cimagf(f[joff]))
+ (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I;
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[j1+joff];
f[j1+joff] = f[j+joff];
f[j+joff] = t1;
}
}
/* then transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
t1 = conjf(sct[kmr*j]);
t2 = t1*f[j+k2+joff];
f[j+k2+joff] = f[j+k1+joff] - t2;
f[j+k1+joff] += t2;
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cfft2rmxy(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nxi,
int nxp, int nxhd, int nyd, int nxhyd, int nxyhd) {
/* this subroutine performs the y part of a two dimensional real to
complex fast fourier transform and its inverse, for a subset of x,
using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, an inverse fourier transform in y is performed
f[m][n] = sum(f[k][j]*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, a forward fourier transform in y is performed
f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nxi = initial x index used
nxp = number of x indices used
nxhd = first dimension of f >= nx/2
nyd = second dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
fourier coefficients are stored as follows:
f[k][j] = real, imaginary part of mode j,k, where
0 <= j < nx/2 and 0 <= k < ny, except for
f[k][1] = real, imaginary part of mode nx/2,k, where
ny/2+1 <= k < ny, and
imag(f[0][0]) = real part of mode nx/2,0 and
imag(f[0][ny/2]) = real part of mode nx/2,ny/2
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt;
int nry, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, nryb, koff;
float complex t1, t2;
if (isign==0)
return;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
ny = 1L<<indy;
nyh = ny/2;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nxt = nxi + nxp - 1;
if (isign > 0)
goto L70;
/* inverse fourier transform */
nryb = nxhy/ny;
nry = nxy/ny;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,koff,t1,t2)
for (i = nxi-1; i < nxt; i++) {
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
koff = nxhd*k;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1;
t1 = f[i+k1];
f[i+k1] = f[i+koff];
f[i+koff] = t1;
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1);
j2 = nxhd*(j + k2);
t1 = sct[kmr*j];
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if (nxi==1) {
for (k = 1; k < nyh; k++) {
koff = nxhd*k;
k1 = nxhd*ny - koff;
t1 = f[k1];
f[k1] = 0.5*(cimagf(f[koff] + t1)
+ crealf(f[koff] - t1)*_Complex_I);
f[koff] = 0.5*(crealf(f[koff] + t1)
+ cimagf(f[koff] - t1)*_Complex_I);
}
}
return;
/* forward fourier transform */
L70: nryb = nxhy/ny;
nry = nxy/ny;
/* scramble modes kx = 0, nx/2 */
if (nxi==1) {
for (k = 1; k < nyh; k++) {
koff = nxhd*k;
k1 = nxhd*ny - koff;
t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I;
f[k1] = conjf(f[koff] - t1);
f[koff] += t1;
}
}
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,koff,t1,t2)
for (i = nxi-1; i < nxt; i++) {
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
koff = nxhd*k;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = nxhd*k1;
t1 = f[i+k1];
f[i+k1] = f[i+koff];
f[i+koff] = t1;
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = nxhd*(j + k1);
j2 = nxhd*(j + k2);
t1 = conjf(sct[kmr*j]);
t2 = t1*f[i+j2];
f[i+j2] = f[i+j1] - t2;
f[i+j1] += t2;
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cfft2rvm3x(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nyi,
int nyp, int nxhd, int nyd, int nxhyd, int nxyhd) {
/* this subroutine performs the x part of 3 two dimensional real to
complex fast fourier transforms, and their inverses, for a subset of
y, using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, two inverse fourier transforms are performed
f[m][n][0:2] = (1/nx*ny)*sum(f[k][j][0:2]*
exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, two forward fourier transforms are performed
f[k][j][0:2] = sum(f[m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)*
exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nyi = initial y index used
nyp = number of y indices used
nxhd = second dimension of f >= nx/2
nyd = third dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
fourier coefficients are stored as follows:
f[k][j][0:2] = real, imaginary part of mode j,k, where
0 <= j < nx/2 and 0 <= k < ny, except for
f[k][1][0:2] = real, imaginary part of mode nx/2,k, where
ny/2+1 <= k < ny, and
imag(f[0][0][0:2]) = real part of mode nx/2,0 and
imag(f[0][ny/2][0:2]) = real part of mode nx/2,ny/2
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt;
int nrx, i, j, k, l, jj, j1, k1, k2, ns, ns2, km, kmr, joff;
int nrxb;
float at1, at2, ani;
float complex t1, t2, t3, t4;
if (isign==0)
return;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
nxh = nx/2;
nxhh = nx/4;
ny = 1L<<indy;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nyt = nyi + nyp - 1;
if (isign > 0)
goto L100;
/* inverse fourier transform */
nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,joff,at1,at2,ani,t1,t2,t3,t4)
for (i = nyi-1; i < nyt; i++) {
joff = 4*nxhd*i;
/* swap complex components */
for (j = 0; j < nxh; j++) {
at1 = cimagf(f[2+4*j+joff]);
at2 = crealf(f[2+4*j+joff]);
f[2+4*j+joff] = crealf(f[1+4*j+joff])
+ crealf(f[3+4*j+joff])*_Complex_I;
f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I;
f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I;
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[4*j1+joff];
t2 = f[1+4*j1+joff];
t3 = f[2+4*j1+joff];
f[4*j1+joff] = f[4*j+joff];
f[1+4*j1+joff] = f[1+4*j+joff];
f[2+4*j1+joff] = f[2+4*j+joff];
f[4*j+joff] = t1;
f[1+4*j+joff] = t2;
f[2+4*j+joff] = t3;
}
}
/* then transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = 4*ns2*k;
k2 = k1 + 4*ns;
for (j = 0; j < ns; j++) {
t1 = sct[kmr*j];
t2 = t1*f[4*j+k2+joff];
t3 = t1*f[1+4*j+k2+joff];
t4 = t1*f[2+4*j+k2+joff];
f[4*j+k2+joff] = f[4*j+k1+joff] - t2;
f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3;
f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4;
f[4*j+k1+joff] += t2;
f[1+4*j+k1+joff] += t3;
f[2+4*j+k1+joff] += t4;
}
}
ns = ns2;
}
/* unscramble coefficients and normalize */
kmr = nxy/nx;
ani = 0.5/(((float) nx)*((float) ny));
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I;
for (jj = 0; jj < 3; jj++) {
t2 = conjf(f[jj+4*(nxh-j)+joff]);
t1 = f[jj+4*j+joff] + t2;
t2 = (f[jj+4*j+joff] - t2)*t3;
f[jj+4*j+joff] = ani*(t1 + t2);
f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2);
}
}
ani = 2.0*ani;
for (jj = 0; jj < 3; jj++) {
f[jj+4*nxhh+joff] = ani*conjf(f[jj+4*nxhh+joff]);
f[jj+joff] = ani*((crealf(f[jj+joff]) + cimagf(f[jj+joff]))
+ (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I);
}
}
return;
/* forward fourier transform */
L100: nrxb = nxhy/nxh;
nrx = nxy/nxh;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,joff,at1,at2,t1,t2,t3,t4)
for (i = nyi-1; i < nyt; i++) {
joff = 4*nxhd*i;
/* scramble coefficients */
kmr = nxy/nx;
for (j = 1; j < nxhh; j++) {
t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I;
for (jj = 0; jj < 3; jj++) {
t2 = conjf(f[jj+4*(nxh-j)+joff]);
t1 = f[jj+4*j+joff] + t2;
t2 = (f[jj+4*j+joff] - t2)*t3;
f[jj+4*j+joff] = t1 + t2;
f[jj+4*(nxh-j)+joff] = conjf(t1 - t2);
}
}
for (jj = 0; jj < 3; jj++) {
f[jj+4*nxhh+joff] = 2.0*conjf(f[jj+4*nxhh+joff]);
f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff]))
+ (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I;
}
/* bit-reverse array elements in x */
for (j = 0; j < nxh; j++) {
j1 = (mixup[j] - 1)/nrxb;
if (j < j1) {
t1 = f[4*j1+joff];
t2 = f[1+4*j1+joff];
t3 = f[2+4*j1+joff];
f[4*j1+joff] = f[4*j+joff];
f[1+4*j1+joff] = f[1+4*j+joff];
f[2+4*j1+joff] = f[2+4*j+joff];
f[4*j+joff] = t1;
f[1+4*j+joff] = t2;
f[2+4*j+joff] = t3;
}
}
/* then transform in x */
ns = 1;
for (l = 0; l < indx1; l++) {
ns2 = ns + ns;
km = nxhh/ns;
kmr = km*nrx;
for (k = 0; k < km; k++) {
k1 = 4*ns2*k;
k2 = k1 + 4*ns;
for (j = 0; j < ns; j++) {
t1 = conjf(sct[kmr*j]);
t2 = t1*f[4*j+k2+joff];
t3 = t1*f[1+4*j+k2+joff];
t4 = t1*f[2+4*j+k2+joff];
f[4*j+k2+joff] = f[4*j+k1+joff] - t2;
f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3;
f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4;
f[4*j+k1+joff] += t2;
f[1+4*j+k1+joff] += t3;
f[2+4*j+k1+joff] += t4;
}
}
ns = ns2;
}
/* swap complex components */
for (j = 0; j < nxh; j++) {
f[3+4*j+joff] = cimagf(f[2+4*j+joff])
+ cimagf(f[3+4*j+joff])*_Complex_I;
at1 = crealf(f[2+4*j+joff]);
f[2+4*j+joff] = cimagf(f[4*j+joff])
+ cimagf(f[1+4*j+joff])*_Complex_I;
at2 = crealf(f[1+4*j+joff]);
f[1+4*j+joff] = at1 + 0.0*_Complex_I;
f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cfft2rvm3y(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nxi,
int nxp, int nxhd, int nyd, int nxhyd, int nxyhd) {
/* this subroutine performs the y part of 3 two dimensional real to
complex fast fourier transforms, and their inverses, for a subset of
x, using complex arithmetic, with OpenMP
for isign = (-1,1), input: all, output: f
for isign = -1, approximate flop count: N*(5*log2(N) + 19/2)
for isign = 1, approximate flop count: N*(5*log2(N) + 15/2)
where N = (nx/2)*ny
indx/indy = exponent which determines length in x/y direction,
where nx=2**indx, ny=2**indy
if isign = -1, two inverse fourier transforms are performed
f[m][n][0:2] = (1/nx*ny)*sum(f[k][j][0:2] *
exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny))
if isign = 1, two forward fourier transforms are performed
f[k][j][0:2] = sum(f[m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)*
exp(sqrt(-1)*2pi*m*k/ny))
mixup = array of bit reversed addresses
sct = sine/cosine table
nxi = initial x index used
nxp = number of x indices used
nxhd = second dimension of f >= nx/2
nyd = third dimension of f >= ny
nxhyd = maximum of (nx/2,ny)
nxyhd = maximum of (nx,ny)/2
fourier coefficients are stored as follows:
f[k][j][0:2] = real, imaginary part of mode j,k, where
0 <= j < nx/2 and 0 <= k < ny, except for
f[k][1][0:2] = real, imaginary part of mode nx/2,k, where
ny/2+1 <= k < ny, and
imag(f[0][0][0:2]) = real part of mode nx/2,0 and
imag(f[0][ny/2][0:2]) = real part of mode nx/2,ny/2
written by viktor k. decyk, ucla
local data */
int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt;
int nry, i, j, k, l, jj, j1, j2, k1, k2, ns, ns2, km, kmr, koff;
int nryb;
float complex t1, t2, t3, t4;
if (isign==0)
return;
indx1 = indx - 1;
indx1y = indx1 > indy ? indx1 : indy;
nx = 1L<<indx;
ny = 1L<<indy;
nyh = ny/2;
nxy = nx > ny ? nx : ny;
nxhy = 1L<<indx1y;
nxt = nxi + nxp - 1;
if (isign > 0)
goto L80;
/* inverse fourier transform */
nryb = nxhy/ny;
nry = nxy/ny;
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,koff,t1,t2,t3,t4)
for (i = nxi-1; i < nxt; i++) {
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
koff = 4*nxhd*k;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = 4*nxhd*k1;
t1 = f[4*i+k1];
t2 = f[1+4*i+k1];
t3 = f[2+4*i+k1];
f[4*i+k1] = f[4*i+koff];
f[1+4*i+k1] = f[1+4*i+koff];
f[2+4*i+k1] = f[2+4*i+koff];
f[4*i+koff] = t1;
f[1+4*i+koff] = t2;
f[2+4*i+koff] = t3;
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = 4*nxhd*(j + k1);
j2 = 4*nxhd*(j + k2);
t1 = sct[kmr*j];
t2 = t1*f[4*i+j2];
t3 = t1*f[1+4*i+j2];
t4 = t1*f[2+4*i+j2];
f[4*i+j2] = f[4*i+j1] - t2;
f[1+4*i+j2] = f[1+4*i+j1] - t3;
f[2+4*i+j2] = f[2+4*i+j1] - t4;
f[4*i+j1] += t2;
f[1+4*i+j1] += t3;
f[2+4*i+j1] += t4;
}
}
ns = ns2;
}
}
/* unscramble modes kx = 0, nx/2 */
if (nxi==1) {
for (k = 1; k < nyh; k++) {
koff = 4*nxhd*k;
k1 = 4*nxhd*ny - koff;
for (jj = 0; jj < 3; jj++) {
t1 = f[jj+k1];
f[jj+k1] = 0.5*(cimagf(f[jj+koff] + t1)
+ crealf(f[jj+koff] - t1)*_Complex_I);
f[jj+koff] = 0.5*(crealf(f[jj+koff] + t1)
+ cimagf(f[jj+koff] - t1)*_Complex_I);
}
}
}
return;
/* forward fourier transform */
L80: nryb = nxhy/ny;
nry = nxy/ny;
/* scramble modes kx = 0, nx/2 */
if (nxi==1) {
for (k = 1; k < nyh; k++) {
koff = 4*nxhd*k;
k1 = 4*nxhd*ny - koff;
for (jj = 0; jj < 3; jj++) {
t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I;
f[jj+k1] = conjf(f[jj+koff] - t1);
f[jj+koff] += t1;
}
}
}
#pragma omp parallel for \
private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,j2,koff,t1,t2,t3,t4)
for (i = nxi-1; i < nxt; i++) {
/* bit-reverse array elements in y */
for (k = 0; k < ny; k++) {
koff = 4*nxhd*k;
k1 = (mixup[k] - 1)/nryb;
if (k < k1) {
k1 = 4*nxhd*k1;
t1 = f[4*i+k1];
t2 = f[1+4*i+k1];
t3 = f[2+4*i+k1];
f[4*i+k1] = f[4*i+koff];
f[1+4*i+k1] = f[1+4*i+koff];
f[2+4*i+k1] = f[2+4*i+koff];
f[4*i+koff] = t1;
f[1+4*i+koff] = t2;
f[2+4*i+koff] = t3;
}
}
/* then transform in y */
ns = 1;
for (l = 0; l < indy; l++) {
ns2 = ns + ns;
km = nyh/ns;
kmr = km*nry;
for (k = 0; k < km; k++) {
k1 = ns2*k;
k2 = k1 + ns;
for (j = 0; j < ns; j++) {
j1 = 4*nxhd*(j + k1);
j2 = 4*nxhd*(j + k2);
t1 = conjf(sct[kmr*j]);
t2 = t1*f[4*i+j2];
t3 = t1*f[1+4*i+j2];
t4 = t1*f[2+4*i+j2];
f[4*i+j2] = f[4*i+j1] - t2;
f[1+4*i+j2] = f[1+4*i+j1] - t3;
f[2+4*i+j2] = f[2+4*i+j1] - t4;
f[4*i+j1] += t2;
f[1+4*i+j1] += t3;
f[2+4*i+j1] += t4;
}
}
ns = ns2;
}
}
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rvmx(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nxhd,
int nyd, int nxhyd, int nxyhd) {
/* wrapper function for real to complex fft, with packed data */
/* parallelized with OpenMP */
/* local data */
int nxh, ny;
static int nxi = 1, nyi = 1;
/* calculate range of indices */
nxh = 1L<<(indx - 1);
ny = 1L<<indy;
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
cfft2rvmxx(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd,
nxyhd);
/* perform y fft */
cfft2rmxy(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd,
nxyhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform y fft */
cfft2rmxy(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd,
nxyhd);
/* perform x fft */
cfft2rvmxx(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd,
nxyhd);
}
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rvm3(float complex f[], int isign, int mixup[],
float complex sct[], int indx, int indy, int nxhd,
int nyd, int nxhyd, int nxyhd) {
/* wrapper function for 3 2d real to complex ffts */
/* local data */
int nxh, ny;
static int nxi = 1, nyi = 1;
/* calculate range of indices */
nxh = 1L<<(indx - 1);
ny = 1L<<indy;
/* inverse fourier transform */
if (isign < 0) {
/* perform x fft */
cfft2rvm3x(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd,
nxyhd);
/* perform y fft */
cfft2rvm3y(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd,
nxyhd);
}
/* forward fourier transform */
else if (isign > 0) {
/* perform y fft */
cfft2rvm3y(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd,
nxyhd);
/* perform x fft */
cfft2rvm3x(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd,
nxyhd);
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
void cdistr2h_(float *part, float *vtx, float *vty, float *vtz,
float *vdx, float *vdy, float *vdz, int *npx, int *npy,
int *idimp, int *nop, int *nx, int *ny, int *ipbc) {
cdistr2h(part,*vtx,*vty,*vtz,*vdx,*vdy,*vdz,*npx,*npy,*idimp,*nop,
*nx,*ny,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cdblkp2l_(float *part, int *kpic, int *nppmx, int *idimp, int *nop,
int *mx, int *my, int *mx1, int *mxy1, int *irc) {
cdblkp2l(part,kpic,nppmx,*idimp,*nop,*mx,*my,*mx1,*mxy1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cppmovin2lt_(float *part, float *ppart, int *kpic, int *nppmx,
int *idimp, int *nop, int *mx, int *my, int *mx1,
int *mxy1, int *irc) {
cppmovin2lt(part,ppart,kpic,*nppmx,*idimp,*nop,*mx,*my,*mx1,*mxy1,
irc);
return;
}
/*--------------------------------------------------------------------*/
void cppmovin2ltp_(float *part, float *ppart, int *kpic, int *kp,
int *nppmx, int *idimp, int *nop, int *mx, int *my,
int *mx1, int *mxy1, int *irc) {
cppmovin2ltp(part,ppart,kpic,kp,*nppmx,*idimp,*nop,*mx,*my,*mx1,
*mxy1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cppcheck2lt_(float *ppart, int *kpic, int *idimp, int *nppmx,
int *nx, int *ny, int *mx, int *my, int *mx1,
int *my1, int *irc) {
cppcheck2lt(ppart,kpic,*idimp,*nppmx,*nx,*ny,*mx,*my,*mx1,*my1,irc);
return;
}
/*--------------------------------------------------------------------*/
void cgbppush23lt_(float *ppart, float *fxy, float *bxy, int *kpic,
float *qbm, float *dt, float *dtc, float *ek,
int *idimp, int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1, int *mxy1,
int *ipbc) {
cgbppush23lt(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,ek,*idimp,*nppmx,*nx,
*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cgbppushf23lt_(float *ppart, float *fxy, float *bxy, int *kpic,
int *ncl, int *ihole, float *qbm, float *dt,
float *dtc, float *ek, int *idimp, int *nppmx,
int *nx, int *ny, int *mx, int *my, int *nxv,
int *nyv, int *mx1, int *mxy1, int *ntmax,
int *irc) {
cgbppushf23lt(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,ek,*idimp,
*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cgrbppush23lt_(float *ppart, float *fxy, float *bxy, int *kpic,
float *qbm, float *dt, float *dtc, float *ci,
float *ek, int *idimp, int *nppmx, int *nx, int *ny,
int *mx, int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ipbc) {
cgrbppush23lt(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,*ci,ek,*idimp,*nppmx,
*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cgrbppushf23lt_(float *ppart, float *fxy, float *bxy, int *kpic,
int *ncl, int *ihole, float *qbm, float *dt,
float *dtc, float *ci, float *ek, int *idimp,
int *nppmx, int *nx, int *ny, int *mx, int *my,
int *nxv, int *nyv, int *mx1, int *mxy1,
int *ntmax, int *irc) {
cgrbppushf23lt(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,*ci,ek,
*idimp,*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,
*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cvgbppush23lt_(float *ppart, float *fxy, float *bxy, int *kpic,
float *qbm, float *dt, float *dtc, float *ek,
int *idimp, int *nppmx, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1, int *mxy1,
int *ipbc) {
cvgbppush23lt(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,ek,*idimp,*nppmx,*nx,
*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cvgbppushf23lt_(float *ppart, float *fxy, float *bxy, int *kpic,
int *ncl, int *ihole, float *qbm, float *dt,
float *dtc, float *ek, int *idimp, int *nppmx,
int *nx, int *ny, int *mx, int *my, int *nxv,
int *nyv, int *mx1, int *mxy1, int *ntmax,
int *irc) {
cvgbppushf23lt(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,ek,*idimp,
*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,
irc);
return;
}
/*--------------------------------------------------------------------*/
void cvgrbppush23lt_(float *ppart, float *fxy, float *bxy, int *kpic,
float *qbm, float *dt, float *dtc, float *ci,
float *ek, int *idimp, int *nppmx, int *nx,
int *ny, int *mx, int *my, int *nxv, int *nyv,
int *mx1, int *mxy1, int *ipbc) {
cvgrbppush23lt(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,*ci,ek,*idimp,*nppmx,
*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cvgrbppushf23lt_(float *ppart, float *fxy, float *bxy, int *kpic,
int *ncl, int *ihole, float *qbm, float *dt,
float *dtc, float *ci, float *ek, int *idimp,
int *nppmx, int *nx, int *ny, int *mx, int *my,
int *nxv, int *nyv, int *mx1, int *mxy1,
int *ntmax, int *irc) {
cvgrbppushf23lt(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,*ci,ek,
*idimp,*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,
*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cgppost2lt_(float *ppart, float *q, int *kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my, int *nxv,
int *nyv, int *mx1, int *mxy1) {
cgppost2lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*nxv,*nyv,*mx1,
*mxy1);
return;
}
/*--------------------------------------------------------------------*/
void cvgppost2lt_(float *ppart, float *q, int *kpic, float *qm,
int *nppmx, int *idimp, int *mx, int *my, int *nxv,
int *nyv, int *mx1, int *mxy1) {
cvgppost2lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*nxv,*nyv,*mx1,
*mxy1);
return;
}
/*--------------------------------------------------------------------*/
void cgjppost2lt_(float *ppart, float *cu, int *kpic, float *qm,
float *dt, int *nppmx, int *idimp, int *nx, int *ny,
int *mx, int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ipbc) {
cgjppost2lt(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*mx,*my,*nxv,
*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cgjppostf2lt_(float *ppart, float *cu, int *kpic, int *ncl,
int *ihole, float *qm, float *dt, int *nppmx,
int *idimp, int *nx, int *ny, int *mx, int *my,
int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax,
int *irc) {
cgjppostf2lt(ppart,cu,kpic,ncl,ihole,*qm,*dt,*nppmx,*idimp,*nx,*ny,
*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cgrjppost2lt_(float *ppart, float *cu, int *kpic, float *qm,
float *dt, float *ci, int *nppmx, int *idimp,
int *nx, int *ny, int *mx, int *my, int *nxv,
int *nyv, int *mx1, int *mxy1, int *ipbc) {
cgrjppost2lt(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*mx,*my,
*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cgrjppostf2lt_(float *ppart, float *cu, int *kpic, int *ncl,
int *ihole, float *qm, float *dt, float *ci,
int *nppmx, int *idimp, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1, int *mxy1,
int *ntmax, int *irc) {
cgrjppostf2lt(ppart,cu,kpic,ncl,ihole,*qm,*dt,*ci,*nppmx,*idimp,*nx,
*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cvgjppost2lt_(float *ppart, float *cu, int *kpic, float *qm,
float *dt, int *nppmx, int *idimp, int *nx, int *ny,
int *mx, int *my, int *nxv, int *nyv, int *mx1,
int *mxy1, int *ipbc) {
cvgjppost2lt(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*mx,*my,
*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cvgjppostf2lt_(float *ppart, float *cu, int *kpic, int *ncl,
int *ihole, float *qm, float *dt, int *nppmx,
int *idimp, int *nx, int *ny, int *mx, int *my,
int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax,
int *irc) {
cvgjppostf2lt(ppart,cu,kpic,ncl,ihole,*qm,*dt,*nppmx,*idimp,*nx,*ny,
*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cvgrjppost2lt_(float *ppart, float *cu, int *kpic, float *qm,
float *dt, float *ci, int *nppmx, int *idimp,
int *nx, int *ny, int *mx, int *my, int *nxv,
int *nyv, int *mx1, int *mxy1, int *ipbc) {
cvgrjppost2lt(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*mx,
*my,*nxv,*nyv,*mx1,*mxy1,*ipbc);
return;
}
/*--------------------------------------------------------------------*/
void cvgrjppostf2lt_(float *ppart, float *cu, int *kpic, int *ncl,
int *ihole, float *qm, float *dt, float *ci,
int *nppmx, int *idimp, int *nx, int *ny, int *mx,
int *my, int *nxv, int *nyv, int *mx1, int *mxy1,
int *ntmax, int *irc) {
cvgrjppostf2lt(ppart,cu,kpic,ncl,ihole,*qm,*dt,*ci,*nppmx,*idimp,*nx,
*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cpporder2lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *nx, int *ny,
int *mx, int *my, int *mx1, int *my1, int *npbmx,
int *ntmax, int *irc) {
cpporder2lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*mx,
*my,*mx1,*my1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cpporderf2lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *mx1,
int *my1, int *npbmx, int *ntmax, int *irc) {
cpporderf2lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1,
*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cvpporder2lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *nx, int *ny,
int *mx, int *my, int *mx1, int *my1, int *npbmx,
int *ntmax, int *irc) {
cvpporder2lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*mx,
*my,*mx1,*my1,*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cvpporderf2lt_(float *ppart, float *ppbuff, int *kpic, int *ncl,
int *ihole, int *idimp, int *nppmx, int *mx1,
int *my1, int *npbmx, int *ntmax, int *irc) {
cvpporderf2lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1,
*npbmx,*ntmax,irc);
return;
}
/*--------------------------------------------------------------------*/
void cbguard2l_(float *bxy, int *nx, int *ny, int *nxe, int *nye) {
cbguard2l(bxy,*nx,*ny,*nxe,*nye);
return;
}
/*--------------------------------------------------------------------*/
void cacguard2l_(float *cu, int *nx, int *ny, int *nxe, int *nye) {
cacguard2l(cu,*nx,*ny,*nxe,*nye);
return;
}
/*--------------------------------------------------------------------*/
void caguard2l_(float *q, int *nx, int *ny, int *nxe, int *nye) {
caguard2l(q,*nx,*ny,*nxe,*nye);
return;
}
/*--------------------------------------------------------------------*/
void cvmpois23_(float complex *q, float complex *fxy, int *isign,
float complex *ffc, float *ax, float *ay, float *affp,
float *we, int *nx, int *ny, int *nxvh, int *nyv,
int *nxhd, int *nyhd) {
cvmpois23(q,fxy,*isign,ffc,*ax,*ay,*affp,we,*nx,*ny,*nxvh,*nyv,
*nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
void cmcuperp2_(float complex *cu, int *nx, int *ny, int *nxvh,
int *nyv) {
cmcuperp2(cu,*nx,*ny,*nxvh,*nyv);
return;
}
/*--------------------------------------------------------------------*/
void cvmibpois23_(float complex *cu, float complex *bxy,
float complex *ffc, float *ci, float *wm, int *nx,
int *ny, int *nxvh, int *nyv, int *nxhd, int *nyhd) {
cvmibpois23(cu,bxy,ffc,*ci,wm,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
void cvmmaxwel2_(float complex *exy, float complex *bxy,
float complex *cu, float complex *ffc, float *ci,
float *dt, float *wf, float *wm, int *nx, int *ny,
int *nxvh, int *nyv, int *nxhd, int *nyhd) {
cvmmaxwel2(exy,bxy,cu,ffc,*ci,*dt,wf,wm,*nx,*ny,*nxvh,*nyv,*nxhd,
*nyhd);
return;
}
/*--------------------------------------------------------------------*/
void cvmemfield2_(float complex *fxy, float complex *exy,
float complex *ffc, int *isign, int *nx, int *ny,
int *nxvh, int *nyv, int *nxhd, int *nyhd) {
cvmemfield2(fxy,exy,ffc,*isign,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd);
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rinit_(int *mixup, float complex *sct, int *indx, int *indy,
int *nxhyd, int *nxyhd) {
cwfft2rinit(mixup,sct,*indx,*indy,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cfft2rvmxx_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nyi,
int *nyp, int *nxhd, int *nyd, int *nxhyd,
int *nxyhd) {
cfft2rvmxx(f,*isign,mixup,sct,*indx,*indy,*nyi,*nyp,*nxhd,*nyd,
*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cfft2rmxy_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nxi,
int *nxp, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) {
cfft2rmxy(f,*isign,mixup,sct,*indx,*indy,*nxi,*nxp,*nxhd,*nyd,*nxhyd,
*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cfft2rvm3x_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nyi,
int *nyp, int *nxhd, int *nyd, int *nxhyd,
int *nxyhd) {
cfft2rvm3x(f,*isign,mixup,sct,*indx,*indy,*nyi,*nyp,*nxhd,*nyd,
*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cfft2rvm3y_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nxi,
int *nxp, int *nxhd, int *nyd, int *nxhyd,
int *nxyhd) {
cfft2rvm3y(f,*isign,mixup,sct,*indx,*indy,*nxi,*nxp,*nxhd,*nyd,
*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rvmx_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nxhd,
int *nyd, int *nxhyd, int *nxyhd) {
cwfft2rvmx(f,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd,*nxyhd);
return;
}
/*--------------------------------------------------------------------*/
void cwfft2rvm3_(float complex *f, int *isign, int *mixup,
float complex *sct, int *indx, int *indy, int *nxhd,
int *nyd, int *nxhyd, int *nxyhd) {
cwfft2rvm3(f,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd,*nxyhd);
return;
} |
compiler_cgen.c | /* Generated by Nim Compiler v0.15.0 */
/* (c) 2016 Andreas Rumpf */
/* The generated code is subject to the original license. */
#define NIM_INTBITS 32
#include "nimbase.h"
#include <string.h>
typedef struct Tcgen529027 Tcgen529027;
typedef struct TNimType TNimType;
typedef struct TNimNode TNimNode;
typedef struct Ropeobj178006 Ropeobj178006;
typedef struct NimStringDesc NimStringDesc;
typedef struct TGenericSeq TGenericSeq;
typedef struct Cell47705 Cell47705;
typedef struct Cellseq47721 Cellseq47721;
typedef struct Gcheap50218 Gcheap50218;
typedef struct Gcstack50216 Gcstack50216;
typedef struct Memregion29885 Memregion29885;
typedef struct Smallchunk29839 Smallchunk29839;
typedef struct Llchunk29879 Llchunk29879;
typedef struct Bigchunk29841 Bigchunk29841;
typedef struct Intset29814 Intset29814;
typedef struct Trunk29810 Trunk29810;
typedef struct Avlnode29883 Avlnode29883;
typedef struct Gcstat50214 Gcstat50214;
typedef struct Cellset47717 Cellset47717;
typedef struct Pagedesc47713 Pagedesc47713;
typedef struct Ttypeseq292836 Ttypeseq292836;
typedef struct Ttype292840 Ttype292840;
typedef struct Intset268030 Intset268030;
typedef struct Trunk268026 Trunk268026;
typedef struct Trunkseq268028 Trunkseq268028;
typedef struct Tpasscontext341002 Tpasscontext341002;
typedef struct Tsym292834 Tsym292834;
typedef struct Tidobj199004 Tidobj199004;
typedef struct TNimObject TNimObject;
typedef struct TY292929 TY292929;
typedef struct Tstrtable292806 Tstrtable292806;
typedef struct Tsymseq292804 Tsymseq292804;
typedef struct Tident199010 Tident199010;
typedef struct Tlineinfo191336 Tlineinfo191336;
typedef struct Tnode292802 Tnode292802;
typedef struct Tloc292816 Tloc292816;
typedef struct Tlib292820 Tlib292820;
typedef struct TY529153 TY529153;
typedef struct TY203018 TY203018;
typedef struct Tidtable292850 Tidtable292850;
typedef struct Tidpairseq292848 Tidpairseq292848;
typedef struct Tlinkedlist147013 Tlinkedlist147013;
typedef struct Tlistentry147007 Tlistentry147007;
typedef struct Tcproc529021 Tcproc529021;
typedef struct Tnodetable292862 Tnodetable292862;
typedef struct Tnodepairseq292860 Tnodepairseq292860;
typedef struct Debuginfo203009 Debuginfo203009;
typedef struct TY203021 TY203021;
typedef struct TY203023 TY203023;
typedef struct Tnodeseq292796 Tnodeseq292796;
typedef struct TY191350 TY191350;
typedef struct TY529095 TY529095;
typedef struct Trodreader332021 Trodreader332021;
typedef struct TY292960 TY292960;
typedef struct TY203017 TY203017;
typedef struct Enumdesc203007 Enumdesc203007;
typedef struct Tinfocc273008 Tinfocc273008;
typedef struct Tblock529019 Tblock529019;
typedef struct Ttraversalclosure537019 Ttraversalclosure537019;
typedef struct TY135002 TY135002;
typedef struct Tbitset339004 Tbitset339004;
typedef struct TY191612 TY191612;
typedef struct Tfileinfo191334 Tfileinfo191334;
typedef struct Tinfoos176035 Tinfoos176035;
typedef struct Tinfocpu176476 Tinfocpu176476;
typedef struct Tstrentry147009 Tstrentry147009;
typedef struct TY128506 TY128506;
typedef struct Basechunk29837 Basechunk29837;
typedef struct Freecell29829 Freecell29829;
typedef struct Tinstantiation292824 Tinstantiation292824;
typedef struct Tidpair292846 Tidpair292846;
typedef struct Tnodepair292858 Tnodepair292858;
typedef struct Filenamemapping203005 Filenamemapping203005;
typedef struct TY332033 TY332033;
typedef struct Tindex332019 Tindex332019;
typedef struct Tiitable299142 Tiitable299142;
typedef struct Tiipairseq299140 Tiipairseq299140;
typedef struct Table332054 Table332054;
typedef struct Keyvaluepairseq332057 Keyvaluepairseq332057;
typedef struct Memfile330202 Memfile330202;
typedef struct TY292961 TY292961;
typedef struct Tiipair299138 Tiipair299138;
typedef struct Keyvaluepair332060 Keyvaluepair332060;
typedef NU8 Tnimkind3403;
typedef NU8 Tnimtypeflag3409Set;
typedef N_NIMCALL_PTR(void, TY3489) (void* p0, NI op0);
typedef N_NIMCALL_PTR(void*, TY3494) (void* p0);
struct TNimType {
NI size;
Tnimkind3403 kind;
Tnimtypeflag3409Set flags;
TNimType* base;
TNimNode* node;
void* finalizer;
TY3489 marker;
TY3494 deepcopy;
};
typedef NU8 Tnimnodekind3405;
struct TNimNode {
Tnimnodekind3405 kind;
NI offset;
TNimType* typ;
NCSTRING name;
NI len;
TNimNode** sons;
};
typedef N_NIMCALL_PTR(void, Globalmarkerproc56202) (void);
struct TGenericSeq {
NI len;
NI reserved;
};
struct NimStringDesc {
TGenericSeq Sup;
NIM_CHAR data[SEQ_DECL_SIZE];
};
struct Cell47705 {
NI refcount;
TNimType* typ;
};
struct Cellseq47721 {
NI len;
NI cap;
Cell47705** d;
};
typedef Smallchunk29839* TY29900[512];
typedef Trunk29810* Trunkbuckets29812[256];
struct Intset29814 {
Trunkbuckets29812 data;
};
struct Memregion29885 {
NI minlargeobj;
NI maxlargeobj;
TY29900 freesmallchunks;
Llchunk29879* llmem;
NI currmem;
NI maxmem;
NI freemem;
NI lastsize;
Bigchunk29841* freechunkslist;
Intset29814 chunkstarts;
Avlnode29883* root;
Avlnode29883* deleted;
Avlnode29883* last;
Avlnode29883* freeavlnodes;
NIM_BOOL locked;
};
struct Gcstat50214 {
NI stackscans;
NI cyclecollections;
NI maxthreshold;
NI maxstacksize;
NI maxstackcells;
NI cycletablesize;
NI64 maxpause;
};
struct Cellset47717 {
NI counter;
NI max;
Pagedesc47713* head;
Pagedesc47713** data;
};
struct Gcheap50218 {
Gcstack50216* stack;
void* stackbottom;
NI cyclethreshold;
Cellseq47721 zct;
Cellseq47721 decstack;
Cellseq47721 tempstack;
NI recgclock;
Memregion29885 region;
Gcstat50214 stat;
Cellset47717 marked;
Cellseq47721 additionalroots;
};
struct Intset268030 {
NI counter;
NI max;
Trunk268026* head;
Trunkseq268028* data;
};
struct TNimObject {
TNimType* m_type;
};
struct Tidobj199004 {
TNimObject Sup;
NI id;
};
typedef NU8 Tsymkind292435;
struct Tstrtable292806 {
NI counter;
Tsymseq292804* data;
};
typedef NU16 Tmagic292524;
struct Tlineinfo191336 {
NI16 line;
NI16 col;
NI32 fileindex;
};
typedef NU32 Tsymflag292184Set;
typedef NU32 Toption169009Set;
typedef NU8 Tlockind292808;
typedef NU8 Tstorageloc292812;
typedef NU16 Tlocflag292810Set;
struct Tloc292816 {
Tlockind292808 k;
Tstorageloc292812 s;
Tlocflag292810Set flags;
Ttype292840* t;
Ropeobj178006* r;
};
struct Tsym292834 {
Tidobj199004 Sup;
Tsymkind292435 kind;
union{
struct {Ttypeseq292836* typeinstcache;
} S1;
struct {TY292929* procinstcache;
Tsym292834* gcunsafetyreason;
} S2;
struct {TY292929* usedgenerics;
Tstrtable292806 tab;
} S3;
struct {Tsym292834* guard;
NI bitsize;
} S4;
} kindU;
Tmagic292524 magic;
Ttype292840* typ;
Tident199010* name;
Tlineinfo191336 info;
Tsym292834* owner;
Tsymflag292184Set flags;
Tnode292802* ast;
Toption169009Set options;
NI position;
NI offset;
Tloc292816 loc;
Tlib292820* annex;
Tnode292802* constraint;
};
struct TY203018 {
NimStringDesc* Field0;
NI Field1;
};
struct Tpasscontext341002 {
TNimObject Sup;
NIM_BOOL fromcache;
};
typedef Ropeobj178006* Tcfilesections529009[18];
typedef NU8 Codegenflag529025Set;
struct Tidtable292850 {
NI counter;
Tidpairseq292848* data;
};
struct Tlinkedlist147013 {
Tlistentry147007* head;
Tlistentry147007* tail;
NI counter;
};
struct Tnodetable292862 {
NI counter;
Tnodepairseq292860* data;
};
typedef Ropeobj178006* TY529136[10];
struct Tcgen529027 {
Tpasscontext341002 Sup;
Tcfilesections529009 s;
Codegenflag529025Set flags;
Tsym292834* module;
NimStringDesc* filename;
NimStringDesc* cfilename;
Ropeobj178006* tmpbase;
Tidtable292850 typecache;
Tidtable292850 forwtypecache;
Intset268030 declaredthings;
Intset268030 declaredprotos;
Tlinkedlist147013 headerfiles;
Intset268030 typeinfomarker;
Tcproc529021* initproc;
Tcproc529021* postinitproc;
Tcproc529021* preinitproc;
Ttypeseq292836* typestack;
Tnodetable292862 datacache;
Tsymseq292804* forwardedprocs;
NI typenodes;
NI nimtypes;
Ropeobj178006* typenodesname;
Ropeobj178006* nimtypesname;
NI labels;
TY529136 extensionloaders;
Ropeobj178006* injectstmt;
};
struct Debuginfo203009 {
NI version;
TY203021* files;
TY203023* enums;
NIM_BOOL conflicts;
};
struct Tident199010 {
Tidobj199004 Sup;
NimStringDesc* s;
Tident199010* next;
NI h;
};
struct Tcproc529021 {
Tsym292834* prc;
NIM_BOOL beforeretneeded;
NIM_BOOL threadvaraccessed;
Tlineinfo191336 lastlineinfo;
Tnodeseq292796* nestedtrystmts;
NI inexceptblock;
TY191350* finallysafepoints;
NI labels;
TY529095* blocks;
NI breakidx;
Toption169009Set options;
NI maxframelen;
Tcgen529027* module;
NI withinloop;
NI splitdecls;
NI gcframeid;
Ropeobj178006* gcframetype;
};
typedef NU8 Tsymflag292184;
typedef NU8 Codegenflag529025;
typedef NU8 Toption169009;
typedef NU64 Tglobaloption169013Set;
typedef NU8 Tglobaloption169013;
typedef NU8 Tcommands169076;
typedef NU16 Tnodeflag292427Set;
typedef NU8 Tnodekind292020;
struct Tnode292802 {
Ttype292840* typ;
Tlineinfo191336 info;
Tnodeflag292427Set flags;
Tnodekind292020 kind;
union{
struct {NI64 intval;
} S1;
struct {NF floatval;
} S2;
struct {NimStringDesc* strval;
} S3;
struct {Tsym292834* sym;
} S4;
struct {Tident199010* ident;
} S5;
struct {Tnodeseq292796* sons;
} S6;
} kindU;
NimStringDesc* comment;
};
typedef Ropeobj178006* TY533289[1];
typedef NU8 Tlocflag292810;
struct Tlistentry147007 {
TNimObject Sup;
Tlistentry147007* prev;
Tlistentry147007* next;
};
typedef NU8 Tlibkind292818;
struct Tlib292820 {
Tlistentry147007 Sup;
Tlibkind292818 kind;
NIM_BOOL generated;
NIM_BOOL isoverriden;
Ropeobj178006* name;
Tnode292802* path;
};
typedef NU8 Tcfilesection529005;
typedef NU8 Ttypekind292244;
typedef NU8 Tcallingconvention292002;
typedef NU32 Ttypeflag292431Set;
struct Ttype292840 {
Tidobj199004 Sup;
Ttypekind292244 kind;
Tcallingconvention292002 callconv;
Ttypeflag292431Set flags;
Ttypeseq292836* sons;
Tnode292802* n;
Tsym292834* owner;
Tsym292834* sym;
Tsym292834* destructor;
Tsym292834* deepcopy;
Tsym292834* assignment;
TY292960* methods;
NI64 size;
NI16 align;
NI16 locklevel;
Tloc292816 loc;
};
typedef Ropeobj178006* TY532811[2];
typedef NU8 Tctypekind529007;
typedef NU64 Ttypekind292244Set;
typedef NU8 Ttypeflag292431;
typedef NimStringDesc* TY533943[14];
typedef NU8 Tprefereddesc320011;
typedef Ropeobj178006* TY178507[1];
struct Enumdesc203007 {
NI size;
NU32 owner;
NI id;
NimStringDesc* name;
TY203017* values;
};
typedef Ropeobj178006* TY535235[4];
typedef NimStringDesc* TY292016[10];
typedef Ropeobj178006* TY535238[3];
struct Ropeobj178006 {
TNimObject Sup;
Ropeobj178006* left;
Ropeobj178006* right;
NI length;
NimStringDesc* data;
};
typedef NU8 Tinfoccprop273004Set;
struct Tinfocc273008 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
NimStringDesc* Field3;
NimStringDesc* Field4;
NimStringDesc* Field5;
NimStringDesc* Field6;
NimStringDesc* Field7;
NimStringDesc* Field8;
NimStringDesc* Field9;
NimStringDesc* Field10;
NimStringDesc* Field11;
NimStringDesc* Field12;
NimStringDesc* Field13;
NimStringDesc* Field14;
NimStringDesc* Field15;
NimStringDesc* Field16;
NimStringDesc* Field17;
NimStringDesc* Field18;
NimStringDesc* Field19;
Tinfoccprop273004Set Field20;
};
typedef Tinfocc273008 TY273427[13];
typedef NU8 Tsystemcc273002;
typedef NU8 Tnodeflag292427;
typedef NU8 Tcprocsection529011;
typedef Ropeobj178006* Tcprocsections529013[3];
struct Tblock529019 {
NI id;
Ropeobj178006* label;
Tcprocsections529013 sections;
NIM_BOOL isloop;
NI16 nestedtrystmts;
NI16 nestedexceptstmts;
NI16 framelen;
};
typedef NU8 Tgcmode169080;
typedef NU8 Ttypeinforeason537016;
struct Ttraversalclosure537019 {
Tcproc529021* p;
NimStringDesc* visitorfrmt;
};
typedef NU8 Ttypefieldresult320145;
typedef NU8 Tinfoccprop273004;
typedef Ropeobj178006* TY536847[6];
typedef Ropeobj178006* TY536401[7];
typedef Ropeobj178006* TY536475[5];
typedef NU16 Tmsgkind191002;
typedef NU8 Tassignmentflag538302Set;
typedef NU8 Tassignmentflag538302;
typedef NimStringDesc* TY552655[19];
typedef NimStringDesc* TY551642[3];
typedef NimStringDesc* TY556764[4];
typedef NimStringDesc* TY551828[42];
typedef NimStringDesc* TY551281[7];
typedef NU8 Trenderflag311004Set;
typedef NimStringDesc* TY557052[2];
typedef NU8 Tclosuretypekind535679;
typedef NimStringDesc* TY556428[6];
typedef NU8 Tanalysisresult473003;
typedef NU8 char136Set[32];
typedef NU8 Tdistinctcompare324427;
typedef NU8 Ttypecmpflag324429Set;
typedef NU16 Tspecialword275003;
typedef NU8 Tsystemos176004;
struct Tfileinfo191334 {
NimStringDesc* fullpath;
NimStringDesc* projpath;
NimStringDesc* shortname;
Ropeobj178006* quotedname;
Ropeobj178006* quotedfullname;
TY191350* lines;
NimStringDesc* dirtyfile;
};
typedef NU8 Tinfoosprop176031Set;
struct Tinfoos176035 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
NimStringDesc* Field3;
NimStringDesc* Field4;
NimStringDesc* Field5;
NimStringDesc* Field6;
NimStringDesc* Field7;
NimStringDesc* Field8;
NimStringDesc* Field9;
NimStringDesc* Field10;
NimStringDesc* Field11;
Tinfoosprop176031Set Field12;
};
typedef Tinfoos176035 TY176082[24];
typedef NU8 Tendian176474;
struct Tinfocpu176476 {
NimStringDesc* Field0;
NI Field1;
Tendian176474 Field2;
NI Field3;
NI Field4;
};
typedef Tinfocpu176476 TY176510[19];
typedef NU8 Tsystemcpu176452;
struct Tstrentry147009 {
Tlistentry147007 Sup;
NimStringDesc* data;
};
struct TY128506 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
};
struct Gcstack50216 {
Gcstack50216* prev;
Gcstack50216* next;
void* starts;
void* pos;
NI maxstacksize;
};
struct Basechunk29837 {
NI prevsize;
NI size;
NIM_BOOL used;
};
struct Smallchunk29839 {
Basechunk29837 Sup;
Smallchunk29839* next;
Smallchunk29839* prev;
Freecell29829* freelist;
NI free;
NI acc;
NF data;
};
struct Llchunk29879 {
NI size;
NI acc;
Llchunk29879* next;
};
struct Bigchunk29841 {
Basechunk29837 Sup;
Bigchunk29841* next;
Bigchunk29841* prev;
NI align;
NF data;
};
typedef NI TY29818[16];
struct Trunk29810 {
Trunk29810* next;
NI key;
TY29818 bits;
};
typedef Avlnode29883* TY29890[2];
struct Avlnode29883 {
TY29890 link;
NI key;
NI upperbound;
NI level;
};
struct Pagedesc47713 {
Pagedesc47713* next;
NI key;
TY29818 bits;
};
struct Trunk268026 {
Trunk268026* next;
NI key;
TY29818 bits;
};
struct Tidpair292846 {
Tidobj199004* key;
TNimObject* val;
};
struct Tnodepair292858 {
NI h;
Tnode292802* key;
NI val;
};
struct Filenamemapping203005 {
NimStringDesc* package;
NimStringDesc* file;
NU32 mangled;
};
typedef NU8 Treasonforrecompile332002;
struct Tiitable299142 {
NI counter;
Tiipairseq299140* data;
};
struct Tindex332019 {
NI lastidxkey;
NI lastidxval;
Tiitable299142 tab;
NimStringDesc* r;
NI offset;
};
struct Table332054 {
Keyvaluepairseq332057* data;
NI counter;
};
struct Memfile330202 {
void* mem;
NI size;
int handle;
};
struct Trodreader332021 {
TNimObject Sup;
NI pos;
NCSTRING s;
Toption169009Set options;
Treasonforrecompile332002 reason;
TY332033* moddeps;
TY332033* files;
NI dataidx;
NI convertersidx;
NI initidx;
NI interfidx;
NI compilerprocsidx;
NI methodsidx;
NimStringDesc* filename;
Tindex332019 index;
Tindex332019 imports;
NI readerindex;
NI line;
NI moduleid;
Table332054 syms;
Memfile330202 memfile;
Tsymseq292804* methods;
NimStringDesc* origfile;
NIM_BOOL inviewmode;
};
struct TY292961 {
NI Field0;
Tsym292834* Field1;
};
struct Freecell29829 {
Freecell29829* next;
NI zerofield;
};
struct Tinstantiation292824 {
Tsym292834* sym;
Ttypeseq292836* concretetypes;
NI compilesid;
};
struct Tiipair299138 {
NI key;
NI val;
};
struct Keyvaluepair332060 {
NI Field0;
NI Field1;
Tsym292834* Field2;
};
struct Ttypeseq292836 {
TGenericSeq Sup;
Ttype292840* data[SEQ_DECL_SIZE];
};
struct TY529153 {
TGenericSeq Sup;
Tcgen529027* data[SEQ_DECL_SIZE];
};
struct Tsymseq292804 {
TGenericSeq Sup;
Tsym292834* data[SEQ_DECL_SIZE];
};
struct TY203017 {
TGenericSeq Sup;
TY203018 data[SEQ_DECL_SIZE];
};
struct TY135002 {
TGenericSeq Sup;
NimStringDesc* data[SEQ_DECL_SIZE];
};
struct Tbitset339004 {
TGenericSeq Sup;
NI8 data[SEQ_DECL_SIZE];
};
struct TY529095 {
TGenericSeq Sup;
Tblock529019 data[SEQ_DECL_SIZE];
};
struct TY191350 {
TGenericSeq Sup;
Ropeobj178006* data[SEQ_DECL_SIZE];
};
struct Tnodeseq292796 {
TGenericSeq Sup;
Tnode292802* data[SEQ_DECL_SIZE];
};
struct TY191612 {
TGenericSeq Sup;
Tfileinfo191334 data[SEQ_DECL_SIZE];
};
struct Trunkseq268028 {
TGenericSeq Sup;
Trunk268026* data[SEQ_DECL_SIZE];
};
struct TY292929 {
TGenericSeq Sup;
Tinstantiation292824* data[SEQ_DECL_SIZE];
};
struct Tidpairseq292848 {
TGenericSeq Sup;
Tidpair292846 data[SEQ_DECL_SIZE];
};
struct Tnodepairseq292860 {
TGenericSeq Sup;
Tnodepair292858 data[SEQ_DECL_SIZE];
};
struct TY203021 {
TGenericSeq Sup;
Filenamemapping203005 data[SEQ_DECL_SIZE];
};
struct TY203023 {
TGenericSeq Sup;
Enumdesc203007 data[SEQ_DECL_SIZE];
};
struct TY292960 {
TGenericSeq Sup;
TY292961 data[SEQ_DECL_SIZE];
};
struct TY332033 {
TGenericSeq Sup;
NI32 data[SEQ_DECL_SIZE];
};
struct Tiipairseq299140 {
TGenericSeq Sup;
Tiipair299138 data[SEQ_DECL_SIZE];
};
struct Keyvaluepairseq332057 {
TGenericSeq Sup;
Keyvaluepair332060 data[SEQ_DECL_SIZE];
};
N_NIMCALL(void, nimGCvisit)(void* d0, NI op0);
N_NIMCALL(void, T839829468_2)(void);
N_NIMCALL(void, nimRegisterGlobalMarker)(Globalmarkerproc56202 markerproc0);
N_NIMCALL(void, T839829468_3)(void);
N_NIMCALL(Ropeobj178006*, rope_178277_2381377266)(NimStringDesc* s0);
static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0);
static N_INLINE(Cell47705*, usrtocell_51840_1689653243)(void* usr0);
static N_INLINE(void, rtladdzct_53001_1689653243)(Cell47705* c0);
N_NOINLINE(void, addzct_51817_1689653243)(Cellseq47721* s0, Cell47705* c0);
N_NIMCALL(void, T839829468_5)(void);
N_NIMCALL(void, T839829468_6)(void);
static N_INLINE(void, nimGCunrefNoCycle)(void* p0);
N_NIMCALL(void*, newSeqRC1)(TNimType* typ0, NI len0);
N_NIMCALL(void, T839829468_7)(void);
N_NIMCALL(void, initintset_268885_2627731572)(Intset268030* Result);
N_NOINLINE(void, chckNil)(void* p0);
N_NIMCALL(void, genericReset)(void* dest0, TNimType* mt0);
N_NIMCALL(void, T839829468_8)(void);
N_NIMCALL(Tcgen529027*, newmodule_563045_839829468)(Tsym292834* module0);
N_NIMCALL(Tcgen529027*, getcgenmodule_532226_839829468)(Tsym292834* s0);
N_NIMCALL(void, internalerror_196113_155036129)(NimStringDesc* errmsg0);
N_NIMCALL(NimStringDesc*, HEX24_196185_1689653243)(TY203018 x0);
N_NIMCALL(Tcgen529027*, rawnewmodule_563038_839829468)(Tsym292834* module0);
N_NIMCALL(Tcgen529027*, rawnewmodule_562663_839829468)(Tsym292834* module0, NimStringDesc* filename0);
N_NIMCALL(void*, newObj)(TNimType* typ0, NI size0);
static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0);
static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0);
N_NIMCALL(NimStringDesc*, HEX24_8401_1689653243)(NU64 x0);
N_NIMCALL(NU32, hashowner_532977_839829468)(Tsym292834* s0);
N_NIMCALL(NU32, register_203121_1926258066)(Debuginfo203009* self0, NimStringDesc* package0, NimStringDesc* file0);
N_NIMCALL(NimStringDesc*, rawNewString)(NI space0);
N_NIMCALL(void, initlinkedlist_147031_3771138726)(Tlinkedlist147013* list0);
N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src0);
N_NIMCALL(void, initidtable_296019_850551059)(Tidtable292850* x0);
N_NIMCALL(Tcproc529021*, newproc_529206_3723162438)(Tsym292834* prc0, Tcgen529027* module0);
static N_INLINE(void, asgnRef)(void** dest0, void* src0);
static N_INLINE(void, incref_53819_1689653243)(Cell47705* c0);
static N_INLINE(void, decref_53401_1689653243)(Cell47705* c0);
N_NIMCALL(Toption169009Set, initprocoptions_562635_839829468)(Tcgen529027* m0);
N_NIMCALL(Tcproc529021*, newpreinitproc_562625_839829468)(Tcgen529027* m0);
N_NIMCALL(Tcproc529021*, newpostinitproc_562630_839829468)(Tcgen529027* m0);
N_NIMCALL(void, initnodetable_296085_850551059)(Tnodetable292862* x0);
N_NIMCALL(Ropeobj178006*, gettempname_533596_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, HEX26_178418_2381377266)(Ropeobj178006* a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, rope_178401_2381377266)(NI64 i0);
N_NIMCALL(NimStringDesc*, tofullpath_192264_155036129)(NI32 fileidx0);
N_NIMCALL(TGenericSeq*, setLengthSeq)(TGenericSeq* seq0, NI elemsize0, NI newlen0);
N_NIMCALL(NimStringDesc*, tofilename_192260_155036129)(NI32 fileidx0);
N_NIMCALL(NimStringDesc*, noschangeFileExt)(NimStringDesc* filename0, NimStringDesc* ext0);
N_NIMCALL(NimStringDesc*, completecfilepath_273854_2528170400)(NimStringDesc* cfile0, NIM_BOOL createsubdir0);
N_NIMCALL(void, readmergeinfo_530613_2760143328)(NimStringDesc* cfilename0, Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, getcfile_563204_839829468)(Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, copyString)(NimStringDesc* src0);
N_NIMCALL(NimStringDesc*, withpackagename_170073_2607990831)(NimStringDesc* path0);
static N_INLINE(NIM_BOOL, skipcodegen_341085_2355241294)(Tnode292802* n0);
N_NIMCALL(void, genstmts_539244_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, expr_539248_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, fillprocloc_539201_839829468)(Tsym292834* sym0);
N_NIMCALL(void, fillloc_532282_839829468)(Tloc292816* a0, Tlockind292808 k0, Ttype292840* typ0, Ropeobj178006* r0, Tstorageloc292812 s0);
N_NIMCALL(void, unsureAsgnRef)(void** dest0, void* src0);
N_NIMCALL(Ropeobj178006*, manglename_533205_839829468)(Tsym292834* s0);
N_NIMCALL(NIM_BOOL, iskeyword_532960_839829468)(Tident199010* w0);
N_NIMCALL(NimStringDesc*, mangle_528847_2036603609)(NimStringDesc* name0);
N_NIMCALL(void, add_178487_2381377266)(Ropeobj178006** a0, NimStringDesc* b0);
N_NIMCALL(void, add_178482_2381377266)(Ropeobj178006** a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, HEX25_178905_2381377266)(NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, genprocprototype_539254_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, useheader_532369_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(NIM_BOOL, includestr_147249_3771138726)(Tlinkedlist147013* list0, NimStringDesc* data0);
N_NIMCALL(NimStringDesc*, getstr_297230_850551059)(Tnode292802* a0);
N_NIMCALL(Tsym292834*, getmodule_299123_2984716966)(Tsym292834* s0);
N_NIMCALL(NIM_BOOL, containsorincl_268862_2627731572)(Intset268030* s0, NI key0);
N_NIMCALL(Ropeobj178006*, ropecg_532407_839829468)(Tcgen529027* m0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NimStringDesc*, nimIntToStr)(NI x0);
static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0);
N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI start_79610_1689653243, NI last0);
N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI first0, NI last0);
N_NIMCALL(Ropeobj178006*, cgsym_532403_839829468)(Tcgen529027* m0, NimStringDesc* name0);
N_NIMCALL(Tsym292834*, getcompilerproc_338746_3937434831)(NimStringDesc* name0);
N_NIMCALL(void, genproc_532951_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(NIM_BOOL, isactivated_561431_839829468)(Tsym292834* prc0);
N_NIMCALL(void, addforwardedproc_532203_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(TGenericSeq*, incrSeqV2)(TGenericSeq* seq0, NI elemsize0);
N_NIMCALL(void, genprocnoforward_560906_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(void, genprocaux_560284_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(Ropeobj178006*, genprocheader_535867_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(void, genclinedir_532813_839829468)(Ropeobj178006** r0, Tlineinfo191336 info0);
N_NIMCALL(void, genclinedir_532725_839829468)(Ropeobj178006** r0, NimStringDesc* filename0, NI line0);
N_NIMCALL(void, addf_179205_2381377266)(Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NimStringDesc*, makesinglelinecstring_528835_2036603609)(NimStringDesc* s0);
N_NIMCALL(NI, safelinenm_532721_839829468)(Tlineinfo191336 info0);
static N_INLINE(NI, tolinenumber_192415_155036129)(Tlineinfo191336 info0);
N_NIMCALL(void, genprocparams_534115_839829468)(Tcgen529027* m0, Ttype292840* t0, Ropeobj178006** rettype0, Ropeobj178006** params0, Intset268030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0);
N_NIMCALL(NIM_BOOL, isinvalidreturntype_533548_839829468)(Ttype292840* rettype0);
N_NIMCALL(Tctypekind529007, maptype_533393_839829468)(Ttype292840* typ0);
N_NIMCALL(Tctypekind529007, mapsettype_533389_839829468)(Ttype292840* typ0);
N_NIMCALL(NI64, getsize_320135_3876443242)(Ttype292840* typ0);
N_NIMCALL(Ttype292840*, lastson_295377_850551059)(Ttype292840* n0);
N_NIMCALL(NI64, firstord_320001_3876443242)(Ttype292840* t0);
N_NIMCALL(Ttype292840*, skiptypes_296099_850551059)(Ttype292840* t0, Ttypekind292244Set kinds0);
N_NIMCALL(NIM_BOOL, isimportedcpptype_533476_839829468)(Ttype292840* t0);
N_NIMCALL(NIM_BOOL, needscomplexassignment_533509_839829468)(Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, containsgarbagecollectedref_320117_3876443242)(Ttype292840* typ0);
static N_INLINE(NIM_BOOL, isobjlackingtypefield_533513_839829468)(Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, ispureobject_320138_3876443242)(Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, gettypedescaux_533503_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0);
N_NIMCALL(Ttype292840*, getuniquetype_528640_2036603609)(Ttype292840* key0);
N_NIMCALL(Ropeobj178006*, gettypepre_533972_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, getsimpletypedesc_533936_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, typenameorliteral_533898_839829468)(Ttype292840* t0, NimStringDesc* literal0);
N_NIMCALL(Ropeobj178006*, gettypename_533313_839829468)(Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, typename_533292_839829468)(Ttype292840* typ0);
N_NIMCALL(NimStringDesc*, reprEnum)(NI e0, TNimType* typ0);
N_NIMCALL(Ropeobj178006*, cachegettype_533591_839829468)(Tidtable292850 tab0, Ttype292840* key0);
N_NIMCALL(TNimObject*, idtableget_299086_2984716966)(Tidtable292850 t0, Tidobj199004* key0);
N_NIMCALL(NimStringDesc*, typetostring_320017_3876443242)(Ttype292840* typ0, Tprefereddesc320011 prefer0);
N_NIMCALL(Ttype292840*, elemtype_320394_3876443242)(Ttype292840* t0);
N_NIMCALL(Ropeobj178006*, HEX26_178447_2381377266)(Ropeobj178006* a0, NimStringDesc* b0);
N_NIMCALL(Ropeobj178006*, gettypeforward_534039_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, isimportedtype_533449_839829468)(Ttype292840* t0);
N_NIMCALL(NimStringDesc*, getforwardstructformat_534015_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, structorunion_534001_839829468)(Ttype292840* t0);
N_NIMCALL(void, idtableput_299094_2984716966)(Tidtable292850* t0, Tidobj199004* key0, TNimObject* val0);
N_NIMCALL(void, pushtype_533958_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, gettypedescweak_534079_839829468)(Tcgen529027* m0, Ttype292840* t0, Intset268030* check0);
N_NIMCALL(void, internalerror_196100_155036129)(Tlineinfo191336 info0, NimStringDesc* errmsg0);
N_NIMCALL(NIM_BOOL, hasenum_203230_1926258066)(Debuginfo203009 self0, NimStringDesc* ename0, NI id0, NU32 owner0);
N_NIMCALL(void*, newSeq)(TNimType* typ0, NI len0);
static N_INLINE(NI, len_293081_850551059)(Tnode292802* n0);
N_NIMCALL(void, registerenum_203419_1926258066)(Debuginfo203009* self0, Enumdesc203007* ed0);
N_NIMCALL(void, genericSeqAssign)(void* dest0, void* src_86804_1689653243, TNimType* mt0);
N_NIMCALL(void, appcg_532632_839829468)(Tcgen529027* m0, Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NI64, lengthord_320007_3876443242)(Ttype292840* t0);
N_NIMCALL(NIM_BOOL, scancppgenericslot_534827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0);
N_NIMCALL(Ttype292840*, resolvestarsincpptype_534891_839829468)(Ttype292840* typ0, NI idx0, NI stars0);
N_NIMCALL(NI, len_295339_850551059)(Ttype292840* n0);
N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI start0);
N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI first0);
N_NIMCALL(Ropeobj178006*, getrecorddesc_534643_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0);
N_NIMCALL(Ropeobj178006*, getrecordfields_534636_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0);
N_NIMCALL(Ropeobj178006*, genrecordfieldsaux_534421_839829468)(Tcgen529027* m0, Tnode292802* n0, Ropeobj178006* accessexpr0, Ttype292840* rectype0, Intset268030* check0);
N_NIMCALL(NI, sonslen_295351_850551059)(Tnode292802* n0);
N_NIMCALL(Tnode292802*, lastson_295364_850551059)(Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, HEX26_178452_2381377266)(NimStringDesc* a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, manglerecfieldname_534361_839829468)(Tsym292834* field0, Ttype292840* rectype0);
N_NIMCALL(NimStringDesc*, manglefield_532973_839829468)(Tident199010* name0);
N_NIMCALL(NIM_CHAR, nsuToUpperAsciiChar)(NIM_CHAR c0);
N_NIMCALL(Ropeobj178006*, gettupledesc_534777_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0);
N_NIMCALL(NI, sonslen_295327_850551059)(Ttype292840* n0);
N_NIMCALL(void, excl_268841_2627731572)(Intset268030* s0, NI key0);
static N_INLINE(NIM_BOOL, iscompiletimeonly_328706_3876443242)(Ttype292840* t0);
N_NIMCALL(Tstorageloc292812, paramstorageloc_534098_839829468)(Tsym292834* param0);
N_NIMCALL(NIM_BOOL, ccgintroducedptr_533609_839829468)(Tsym292834* s0);
N_NIMCALL(Tctypekind529007, mapreturntype_533445_839829468)(Ttype292840* typ0);
N_NIMCALL(Tnode292802*, easyresultasgn_560191_839829468)(Tnode292802* n0);
static N_INLINE(Tnode292802*, HEX5BHEX5D_293238_850551059)(Tnode292802* n0, NI i0);
N_NIMCALL(Tnode292802*, getbody_335227_1724185294)(Tsym292834* s0);
N_NIMCALL(Ropeobj178006*, localvardecl_538532_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(Ropeobj178006*, gettypedesc_535671_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(void, initlocexprsingleuse_539289_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0);
N_NIMCALL(void, initloc_532273_839829468)(Tloc292816* result0, Tlockind292808 k0, Ttype292840* typ0, Tstorageloc292812 s0);
N_NIMCALL(void, linefmt_532714_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
static N_INLINE(Ropeobj178006**, s_529179_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0);
N_NIMCALL(Ropeobj178006*, indentline_532656_839829468)(Tcproc529021* p0, Ropeobj178006* r0);
N_NIMCALL(void, prepend_178893_2381377266)(Ropeobj178006** a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, rdloc_538188_839829468)(Tloc292816 a0);
N_NIMCALL(void, assignlocalvar_538614_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, line_532690_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, Ropeobj178006* r0);
N_NIMCALL(void, localdebuginfo_538449_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, linef_532700_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, makecstring_191638_155036129)(NimStringDesc* s0);
N_NIMCALL(NimStringDesc*, nsuNormalize)(NimStringDesc* s0);
N_NIMCALL(Ropeobj178006*, gentypeinfo_535941_839829468)(Tcgen529027* m0, Ttype292840* t_535944_839829468);
N_NIMCALL(Tcgen529027*, bmod_529201_3723162438)(Tsym292834* module0);
N_NIMCALL(void, gentypeinfoauxbase_535960_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0, Ropeobj178006* base0);
N_NIMCALL(NIM_BOOL, canformacycle_320123_3876443242)(Ttype292840* typ0);
N_NIMCALL(void, gentupleinfo_536549_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(Ropeobj178006*, getnimnode_535945_839829468)(Tcgen529027* m0);
N_NIMCALL(Ttype292840*, fakeclosuretype_537010_839829468)(Tsym292834* owner0);
N_NIMCALL(Ttype292840*, newtype_295107_850551059)(Ttypekind292244 kind0, Tsym292834* owner0);
N_NIMCALL(void, rawaddson_296394_850551059)(Ttype292840* father0, Ttype292840* son0);
N_NIMCALL(void, gentypeinfoaux_536027_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0);
N_NIMCALL(Ropeobj178006*, gentraverseproc_537632_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttypeinforeason537016 reason0);
N_NIMCALL(void, gentraverseprocseq_537399_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ0);
N_NIMCALL(void, gettemp_537032_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* result0, NIM_BOOL needsinit0);
N_NIMCALL(void, constructloc_538388_839829468)(Tcproc529021* p0, Tloc292816 loc0, NIM_BOOL istemp0);
static N_INLINE(NIM_BOOL, iscomplexvaluetype_538317_839829468)(Ttype292840* t0);
N_NIMCALL(void, usestringh_532345_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, addrloc_538204_839829468)(Tloc292816 a0);
N_NIMCALL(void, genobjectinit_538242_839829468)(Tcproc529021* p0, Tcprocsection529011 section0, Ttype292840* t0, Tloc292816 a0, NIM_BOOL takeaddr0);
N_NIMCALL(Ttypefieldresult320145, analyseobjectwithtypefield_320149_3876443242)(Ttype292840* t0);
N_NIMCALL(Ttype292840*, getsystype_338150_3937434831)(Ttypekind292244 kind0);
N_NIMCALL(void, gentraverseproc_537022_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ_537027_839829468);
static N_INLINE(Ropeobj178006*, parentobj_537257_839829468)(Ropeobj178006* accessor0, Tcgen529027* m0);
N_NIMCALL(void, gentraverseproc_537039_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Tnode292802* n0);
N_NIMCALL(void, gencaserange_537028_839829468)(Tcproc529021* p0, Tnode292802* branch0);
N_NIMCALL(Ropeobj178006*, genliteral_539273_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, genliteral_549476_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* ty0);
N_NIMCALL(Ropeobj178006*, intliteral_539270_839829468)(NI64 i0);
N_NIMCALL(Ropeobj178006*, int64literal_549430_839829468)(NI64 i0);
N_NIMCALL(Ropeobj178006*, uint64literal_549442_839829468)(NU64 i0);
N_NIMCALL(NI, nodetabletestorset_342682_1142335848)(Tnodetable292862* t0, Tnode292802* key0, NI val0);
N_NIMCALL(Ropeobj178006*, getstrlit_549468_839829468)(Tcgen529027* m0, NimStringDesc* s0);
N_NIMCALL(NimStringDesc*, tostrmaxprecision_298007_3471544153)(NF f0);
N_NIMCALL(Tnode292802*, copynode_296528_850551059)(Tnode292802* src0);
N_NIMCALL(void, linecg_532707_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, genarrayinfo_537005_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, gensetinfo_536867_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, genenuminfo_536597_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, genobjectinfo_536506_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0);
N_NIMCALL(void, genobjectfields_536104_839829468)(Tcgen529027* m0, Ttype292840* typ0, Tnode292802* n0, Ropeobj178006* expr0);
N_NIMCALL(Ropeobj178006*, discriminatortablename_536057_839829468)(Tcgen529027* m0, Ttype292840* objtype_536060_839829468, Tsym292834* d0);
N_NIMCALL(Tsym292834*, lookupinrecord_299119_2984716966)(Tnode292802* n0, Tident199010* field0);
N_NIMCALL(NI64, getordvalue_320129_3876443242)(Tnode292802* n0);
N_NIMCALL(void, gendeepcopyproc_538066_839829468)(Tcgen529027* m0, Tsym292834* s0, Ropeobj178006* result0);
N_NIMCALL(void, initlocalvar_538398_839829468)(Tcproc529021* p0, Tsym292834* v0, NIM_BOOL immediateasgn0);
N_NIMCALL(void, fillresult_533865_839829468)(Tsym292834* param0);
N_NIMCALL(void, assignparam_538994_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, closuresetup_560158_839829468)(Tcproc529021* p0, Tsym292834* prc0);
N_NIMCALL(Ropeobj178006*, initgcframe_538435_839829468)(Tcproc529021* p0);
N_NIMCALL(Ropeobj178006*, initframe_560140_839829468)(Tcproc529021* p0, Ropeobj178006* procname0, Ropeobj178006* filename0);
N_NIMCALL(Ropeobj178006*, quotedfilename_196818_155036129)(Tlineinfo191336 i0);
N_NIMCALL(void, appcg_532648_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, deinitgcframe_538441_839829468)(Tcproc529021* p0);
N_NIMCALL(Ropeobj178006*, deinitframe_560150_839829468)(Tcproc529021* p0);
N_NIMCALL(Tcgen529027*, findpendingmodule_532241_839829468)(Tcgen529027* m0, Tsym292834* s0);
N_NIMCALL(void, symindynamiclib_559929_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(NIM_BOOL, isgetprocaddr_559442_839829468)(Tlib292820* lib0);
N_NIMCALL(void, loaddynamiclib_559480_839829468)(Tcgen529027* m0, Tlib292820* lib0);
N_NIMCALL(void, libcandidates_170605_2607990831)(NimStringDesc* s0, TY135002** dest0);
N_NIMCALL(void, rawmessage_194612_155036129)(Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(void, initlocexpr_539283_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0);
N_NIMCALL(Ropeobj178006*, mangledynlibproc_538816_839829468)(Tsym292834* sym0);
N_NIMCALL(NimStringDesc*, HEX24_178856_2381377266)(Ropeobj178006* r0);
N_NIMCALL(void, symindynamiclibpartial_560071_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, genvarprototype_539236_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, genvarprototypeaux_544254_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, declarethreadvar_538676_839829468)(Tcgen529027* m0, Tsym292834* s0, NIM_BOOL isextern0);
static N_INLINE(NIM_BOOL, emulatedthreadvars_532949_839829468)(void);
static N_INLINE(NIM_BOOL, crossescppboundary_560754_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, putlocintodest_539258_839829468)(Tcproc529021* p0, Tloc292816* d0, Tloc292816 s0);
N_NIMCALL(void, genassignment_539264_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0);
N_NIMCALL(void, genrefassign_538311_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0);
static N_INLINE(NIM_BOOL, usesnativegc_169177_2607990831)(void);
N_NIMCALL(void, optasgnloc_549788_839829468)(Tloc292816 a0, Ttype292840* t0, Ropeobj178006* field0, Tloc292816* Result);
N_NIMCALL(void, genoptasgntuple_550001_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0);
N_NIMCALL(void, gengenericasgn_550167_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0);
N_NIMCALL(NI, asgncomplexity_549750_839829468)(Tnode292802* n0);
N_NIMCALL(void, genoptasgnobject_550084_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0, Tnode292802* t0);
N_NIMCALL(void, genericAssign)(void* dest0, void* src0, TNimType* mt0);
N_NIMCALL(void, localerror_196085_155036129)(Tlineinfo191336 info0, NimStringDesc* arg0);
N_NIMCALL(NIM_BOOL, issimpleconst_532311_839829468)(Ttype292840* typ0);
N_NIMCALL(void, putintodest_550468_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0, Tstorageloc292812 s0);
N_NIMCALL(void, gencomplexconst_558249_839829468)(Tcproc529021* p0, Tsym292834* sym0, Tloc292816* d0);
N_NIMCALL(void, requestconstimpl_539240_839829468)(Tcproc529021* p0, Tsym292834* sym0);
N_NIMCALL(Ropeobj178006*, genconstexpr_554849_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, tobitset_340001_452470228)(Tnode292802* s0, Tbitset339004** b0);
N_NIMCALL(Ropeobj178006*, genrawsetdata_549629_839829468)(Tbitset339004* cs0, NI size0);
N_NIMCALL(NimStringDesc*, nsuToHex)(NI64 x0, NI len0);
N_NIMCALL(NI64, bitsettoword_549578_839829468)(Tbitset339004* s0, NI size0);
N_NIMCALL(Ropeobj178006*, genconstseq_559371_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* t0);
N_NIMCALL(void, appcg_532640_839829468)(Tcgen529027* m0, Tcfilesection529005 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, genconstsimplelist_559299_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, gennamedconstexpr_559284_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, accessthreadlocalvar_532945_839829468)(Tcproc529021* p0, Tsym292834* s0);
static N_INLINE(Ropeobj178006**, procsec_529194_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0);
static N_INLINE(NIM_BOOL, isemptytype_297440_850551059)(Ttype292840* t0);
N_NIMCALL(void, putdataintodest_550436_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0);
N_NIMCALL(void, genlinedir_532823_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Ropeobj178006*, sourceline_192068_155036129)(Tlineinfo191336 i0);
N_NIMCALL(NIM_BOOL, freshlineinfo_532818_839829468)(Tcproc529021* p0, Tlineinfo191336 info0);
N_NIMCALL(void, genmagicexpr_557033_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, genandor_554311_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(Ropeobj178006*, getlabel_539217_839829468)(Tcproc529021* p0);
N_NIMCALL(void, fixlabel_539230_839829468)(Tcproc529021* p0, Ropeobj178006* labl0);
N_NIMCALL(void, unaryarith_552646_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, unaryarithoverflow_551633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(void, binaryfloatarith_556728_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(void, binaryarith_551819_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, geneqproc_552214_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, binaryarithoverflow_551262_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(Ropeobj178006*, binaryarithoverflowraw_551235_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816 a0, Tloc292816 b0, NimStringDesc* frmt0);
N_NIMCALL(Ropeobj178006*, rdcharloc_538227_839829468)(Tloc292816 a0);
N_NIMCALL(NI64, lastord_320004_3876443242)(Ttype292840* t0);
N_NIMCALL(void, genrepr_555339_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, lenfield_539305_839829468)(Tcproc529021* p0);
N_NIMCALL(void, gcusage_554439_839829468)(Tnode292802* n0);
N_NIMCALL(void, message_196095_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(NimStringDesc*, rendertree_311044_382274130)(Tnode292802* n0, Trenderflag311004Set renderflags0);
N_NIMCALL(void, gengettypeinfo_555383_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genswap_555638_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, unaryexpr_551209_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, binarystmt_550501_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genstrconcat_554452_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genstrappend_554554_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genseqelemappend_554683_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genstrequals_556666_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, binaryexpr_550549_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genisnil_552620_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gendollar_555391_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genof_555331_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genof_555201_839829468)(Tcproc529021* p0, Tnode292802* x0, Ttype292840* typ0, Tloc292816* d0);
N_NIMCALL(void, globalerror_196071_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(Ropeobj178006*, genofhelper_555139_839829468)(Tcproc529021* p0, Ttype292840* dest0, Ropeobj178006* a0);
N_NIMCALL(void, gennew_554782_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, rawgennew_554741_839829468)(Tcproc529021* p0, Tloc292816 a0, Ropeobj178006* sizeexpr_554745_839829468);
N_NIMCALL(void, gennewfinalize_555110_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gennewseq_554824_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gennewseqaux_554795_839829468)(Tcproc529021* p0, Tloc292816 dest0, Ropeobj178006* length0);
N_NIMCALL(void, gennewseqofcap_554836_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensomecast_556480_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, getclosuretype_535683_839829468)(Tcgen529027* m0, Ttype292840* t0, Tclosuretypekind535679 kind0);
N_NIMCALL(void, genord_556474_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, unaryexprchar_551222_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genarraylen_555415_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, unarystmt_550527_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, gensetlengthstr_555632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensetlengthseq_555500_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensetop_556419_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, binarystmtinexcl_555857_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(Ropeobj178006*, rdsetelemloc_555662_839829468)(Tloc292816 a0, Ttype292840* settype0);
N_NIMCALL(void, binaryexprchar_550809_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, geninop_556009_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, fewcmps_555803_839829468)(Tnode292802* s0);
N_NIMCALL(void, geninexpraux_553496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0);
N_NIMCALL(void, binaryexprin_555837_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, gencall_543632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genclosurecall_540452_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, genarg_539787_839829468)(Tcproc529021* p0, Tnode292802* n_539790_839829468, Tsym292834* param0, Tnode292802* call0);
static N_INLINE(Ropeobj178006*, genargstringtocstring_539776_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, openarrayloc_539665_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tnode292802*, skipconv_328882_3876443242)(Tnode292802* n0);
N_NIMCALL(Tmagic292524, getmagic_318502_2616423590)(Tnode292802* op0);
N_NIMCALL(Ropeobj178006*, genargnoparam_539938_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, getrawproctype_540459_839829468)(Tcproc529021* p0, Ttype292840* t0);
N_NIMCALL(NIM_BOOL, leftappearsonrightside_539329_839829468)(Tnode292802* le0, Tnode292802* ri0);
N_NIMCALL(Tanalysisresult473003, ispartof_473340_788060399)(Tnode292802* a0, Tnode292802* b0);
static N_INLINE(NIM_BOOL, hasnoinit_539383_839829468)(Tnode292802* call0);
N_NIMCALL(void, resetloc_538350_839829468)(Tcproc529021* p0, Tloc292816* loc0);
N_NIMCALL(Ropeobj178006*, addcomma_540464_839829468)(Ropeobj178006* r0);
N_NIMCALL(void, geninfixcall_541929_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, contains_110056_4286263276)(NimStringDesc* s0, char136Set chars0);
N_NIMCALL(Ropeobj178006*, genpatterncall_541699_839829468)(Tcproc529021* p0, Tnode292802* ri_541702_839829468, NimStringDesc* pat0, Ttype292840* typ_541704_839829468);
N_NIMCALL(Ropeobj178006*, genotherarg_539277_839829468)(Tcproc529021* p0, Tnode292802* ri0, NI i0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, genthisarg_541475_839829468)(Tcproc529021* p0, Tnode292802* ri_541478_839829468, NI i0, Ttype292840* typ0);
N_NIMCALL(Tnode292802*, skipaddrderef_541433_839829468)(Tnode292802* node0);
N_NIMCALL(void, fixupcall_539410_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0, Ropeobj178006* callee0, Ropeobj178006* params0);
N_NIMCALL(void, gennamedparamcall_542616_839829468)(Tcproc529021* p0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, contains_110046_4286263276)(NimStringDesc* s0, NIM_CHAR c0);
N_NIMCALL(void, genprefixcall_539960_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
static N_INLINE(void, poststmtactions_532942_839829468)(Tcproc529021* p0);
N_NIMCALL(void, genreset_554731_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genecho_554369_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(NimStringDesc*, nsuRepeatStr)(NimStringDesc* s0, NI n0);
N_NIMCALL(void, genarrtoseq_555046_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, genseqconstr_555004_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, localerror_196080_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(Tnode292802*, wrapprocforspawn_435501_2218250499)(Tsym292834* owner0, Tnode292802* spawnexpr0, Ttype292840* rettype0, Tnode292802* barrier0, Tnode292802* dest0);
N_NIMCALL(Tnode292802*, liftparallel_478822_1773027539)(Tsym292834* owner0, Tnode292802* n0);
N_NIMCALL(void, gendeepcopy_550374_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0);
N_NIMCALL(NIM_BOOL, isdeepconstexpr_318566_2616423590)(Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, gensetnode_549664_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gensetconstr_557496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x0);
N_NIMCALL(void, exprcomplexconst_558684_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genarrayconstr_558207_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, handleconstexpr_554853_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, gentupleconstr_557618_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genobjconstr_554903_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Tsym292834*, lookupfieldagain_553153_839829468)(Tcproc529021* p0, Ttype292840* ty_553156_839829468, Tsym292834* field0, Ropeobj178006** r0);
N_NIMCALL(void, genfieldcheck_553504_839829468)(Tcproc529021* p0, Tnode292802* e0, Ropeobj178006* obj0, Tsym292834* field0, Ttype292840* origty0);
N_NIMCALL(Tnode292802*, newstrnode_293678_850551059)(Tnodekind292020 kind0, NimStringDesc* strval0);
N_NIMCALL(void, gencast_556537_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genconv_556632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, comparetypes_326214_3876443242)(Ttype292840* x0, Ttype292840* y0, Tdistinctcompare324427 cmp0, Ttypecmpflag324429Set flags0);
N_NIMCALL(void, genaddr_553051_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
static N_INLINE(NIM_BOOL, iscppref_552807_839829468)(Tcproc529021* p0, Ttype292840* typ0);
N_NIMCALL(void, genbracketexpr_554277_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genarrayelem_554093_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, isconstexpr_318510_2616423590)(Tnode292802* n0);
N_NIMCALL(void, genopenarrayelem_554169_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, genseqelem_554205_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, gencstringelem_554144_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, gentupleelem_553124_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genderef_543921_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NIM_BOOL enforcederef0);
N_NIMCALL(void, genrecordfield_553448_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ttype292840*, genrecordfieldaux_553096_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tloc292816* a0);
N_NIMCALL(void, gencheckedrecordfield_554046_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genblock_546083_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, startblock_543978_839829468)(Tcproc529021* p0, NimStringDesc* start0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, endblock_544060_839829468)(Tcproc529021* p0);
N_NIMCALL(void, endblock_544035_839829468)(Tcproc529021* p0, Ropeobj178006* blockend0);
N_NIMCALL(Ropeobj178006*, blockbody_544025_839829468)(Tblock529019* b0);
N_NIMCALL(void, genstmtlistexpr_558402_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genif_544982_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, downconv_558581_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, inheritancediff_326252_3876443242)(Ttype292840* a0, Ttype292840* b0);
N_NIMCALL(void, upconv_558431_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genrangechck_556590_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* magic0);
N_NIMCALL(void, convstrtocstr_556642_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, convcstrtostr_556654_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genclosure_557836_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
static N_INLINE(NIM_BOOL, isconstclosure_557810_839829468)(Tnode292802* n0);
static N_INLINE(NIM_BOOL, isroutine_297323_850551059)(Tsym292834* s0);
N_NIMCALL(void, genwhilestmt_545984_839829468)(Tcproc529021* p0, Tnode292802* t0);
static N_INLINE(Ropeobj178006*, assignlabel_544020_839829468)(Tblock529019* b0);
N_NIMCALL(NIM_BOOL, stmtscontainpragma_528083_2036603609)(Tnode292802* n0, Tspecialword275003 w0);
N_NIMCALL(void, gencomputedgoto_545744_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genvarstmt_544854_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gensinglevar_544276_839829468)(Tcproc529021* p0, Tnode292802* a0);
N_NIMCALL(void, gengotovar_544258_839829468)(Tcproc529021* p0, Tnode292802* value0);
N_NIMCALL(void, assignglobalvar_538819_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, varindynamiclib_538812_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, registergcroot_543762_839829468)(Tcproc529021* p0, Tsym292834* v0);
N_NIMCALL(Ropeobj178006*, gentraverseprocforglobal_538032_839829468)(Tcgen529027* m0, Tsym292834* s0);
static N_INLINE(NIM_BOOL, isassignedimmediately_543781_839829468)(Tnode292802* n0);
N_NIMCALL(NIM_BOOL, containshiddenpointer_320120_3876443242)(Ttype292840* typ0);
static N_INLINE(void, loadinto_543928_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* a0);
N_NIMCALL(void, genasgncall_543695_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(void, genclosurevar_544832_839829468)(Tcproc529021* p0, Tnode292802* a0);
N_NIMCALL(void, genvartuple_543794_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tnode292802*, lowertupleunpacking_433037_2218250499)(Tnode292802* n0, Tsym292834* owner0);
N_NIMCALL(void, genconststmt_544909_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(NIM_BOOL, containscompiletimeonly_328721_3876443242)(Ttype292840* t0);
static N_INLINE(NIM_BOOL, emitlazily_532248_839829468)(Tsym292834* s0);
N_NIMCALL(void, gencase_547826_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, genstringcase_547416_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(NI, nextpoweroftwo_101629_1009420244)(NI x0);
N_NIMCALL(void, gencasestringbranch_547100_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816 e0, Ropeobj178006* labl0, Ropeobj178006** branches0, NI branches0Len0);
N_NIMCALL(NI64, hashstring_528100_2036603609)(NimStringDesc* s0);
N_NIMCALL(Ropeobj178006*, gencasesecondpass_546965_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NI labid0, NI until0);
N_NIMCALL(void, exprblock_544103_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, gencasegeneric_547087_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0);
N_NIMCALL(Ropeobj178006*, genifforcaseuntil_547021_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc292816 a0);
N_NIMCALL(void, gencasegenericbranch_546910_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816 e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj178006* labl0);
N_NIMCALL(void, gengotoforcase_545673_839829468)(Tcproc529021* p0, Tnode292802* casestmt0);
N_NIMCALL(void, genordinalcase_547724_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, ifswitchsplitpoint_547615_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(NIM_BOOL, branchhastoobigrange_547575_839829468)(Tnode292802* b0);
N_NIMCALL(void, genreturnstmt_545617_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, blockleaveactions_545442_839829468)(Tcproc529021* p0, NI howmanytrys0, NI howmanyexcepts0);
static N_INLINE(Tnode292802*, pop_318246_1689653243)(Tnodeseq292796** s0);
N_NIMCALL(void, genbreakstmt_546444_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genasgn_549239_839829468)(Tcproc529021* p0, Tnode292802* e0, NIM_BOOL fastasgn0);
N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_549080_839829468)(Tcproc529021* p0, Tnode292802* asgn0);
N_NIMCALL(void, asgnfielddiscriminant_549209_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gendiscriminantcheck_549144_839829468)(Tcproc529021* p0, Tloc292816 a0, Tloc292816 tmp0, Ttype292840* objtype0, Tsym292834* field0);
N_NIMCALL(Ropeobj178006*, discriminatortabledecl_536094_839829468)(Tcgen529027* m0, Ttype292840* objtype0, Tsym292834* d0);
N_NIMCALL(void, genasmstmt_548659_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Ropeobj178006*, genasmoremitstmt_548529_839829468)(Tcproc529021* p0, Tnode292802* t0, NIM_BOOL isasmstmt0);
N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest0, NI addlen0);
N_NIMCALL(void, gentrycpp_547865_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
static N_INLINE(void, gensimpleblock_544095_839829468)(Tcproc529021* p0, Tnode292802* stmts0);
N_NIMCALL(void, gentry_548114_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, isdefined_200011_1967573533)(NimStringDesc* symbol0);
N_NIMCALL(void, line_532695_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* r0);
static N_INLINE(Ropeobj178006*, pop_178530_1689653243)(TY191350** s0);
N_NIMCALL(void, genraisestmt_546828_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(NimStringDesc*, getraisefrmt_546824_839829468)(Tcproc529021* p0);
N_NIMCALL(void, gentypesection_538184_839829468)(Tcgen529027* m0, Tnode292802* n0);
N_NIMCALL(void, genpragma_549039_839829468)(Tcproc529021* p_549041_839829468, Tnode292802* n0);
N_NIMCALL(Tspecialword275003, whichpragma_318911_2616423590)(Tnode292802* n0);
N_NIMCALL(void, genemit_548839_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Tcfilesection529005, determinesection_548819_839829468)(Tnode292802* n0);
N_NIMCALL(NIM_BOOL, nsuStartsWith)(NimStringDesc* s0, NimStringDesc* prefix0);
N_NIMCALL(void, genbreakpoint_548862_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genwatchpoint_549016_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tsym292834*, skipgenericowner_297279_850551059)(Tsym292834* s0);
N_NIMCALL(void, genparforstmt_546208_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genstate_544117_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gengotostate_544144_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genbreakstate_544229_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, registermoduletomain_562243_839829468)(Tsym292834* m0);
N_NIMCALL(Ropeobj178006*, getinitname_562235_839829468)(Tsym292834* m0);
N_NIMCALL(Ropeobj178006*, getsomeinitname_561904_839829468)(Tsym292834* m0, NimStringDesc* suffix0);
N_NIMCALL(Ropeobj178006*, getdatinitname_562239_839829468)(Tsym292834* m0);
N_NIMCALL(Tnode292802*, generatemethoddispatchers_432151_3853300031)(void);
N_NIMCALL(void, genmainproc_561729_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, genfilenames_561688_839829468)(Tcgen529027* m0);
N_NIMCALL(void, finishmodule_563420_839829468)(Tcgen529027* m0);
N_NIMCALL(void, updatecachedmodule_563813_839829468)(Tcgen529027* m0);
N_NIMCALL(NIM_BOOL, mergerequired_530832_2760143328)(Tcgen529027* m0);
N_NIMCALL(void, mergefiles_531241_2760143328)(NimStringDesc* cfilename0, Tcgen529027* m0);
N_NIMCALL(void, geninitcode_562286_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, gensectionstart_530081_2760143328)(Tcprocsection529011 ps0);
N_NIMCALL(Ropeobj178006*, gensectionend_530116_2760143328)(Tcprocsection529011 ps0);
N_NIMCALL(Ropeobj178006*, gensectionstart_530015_2760143328)(Tcfilesection529005 fs0);
N_NIMCALL(Ropeobj178006*, gensectionend_530050_2760143328)(Tcfilesection529005 fs0);
N_NIMCALL(void, finishtypedescriptions_535842_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, genmodule_562491_839829468)(Tcgen529027* m0, NimStringDesc* cfile0);
N_NIMCALL(Ropeobj178006*, getfileheader_561683_839829468)(NimStringDesc* cfile0);
N_NIMCALL(Ropeobj178006*, getcopyright_561665_839829468)(NimStringDesc* cfile0);
N_NIMCALL(NimStringDesc*, getcompilecfilecmd_274284_2528170400)(NimStringDesc* cfilename0, NIM_BOOL isexternal0);
static N_INLINE(void, addinttypes_561659_839829468)(Ropeobj178006** result0);
N_NIMCALL(Ropeobj178006*, genmergeinfo_530203_2760143328)(Tcgen529027* m0);
N_NIMCALL(void, generatethreadlocalstorage_538717_839829468)(Tcgen529027* m0);
N_NIMCALL(void, generateheaders_560104_839829468)(Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, nsuReplaceChar)(NimStringDesc* s0, NIM_CHAR sub0, NIM_CHAR by0);
N_NIMCALL(void, writerope_178836_2381377266)(Ropeobj178006* head0, NimStringDesc* filename0, NIM_BOOL usewarning0);
N_NIMCALL(void, addfiletocompile_273863_2528170400)(NimStringDesc* filename0);
N_NIMCALL(void, addfiletolink_273872_2528170400)(NimStringDesc* filename0);
N_NIMCALL(void, writemodule_563637_839829468)(Tcgen529027* m0, NIM_BOOL pending0);
N_NIMCALL(void, generatethreadvarssize_538771_839829468)(Tcgen529027* m0);
N_NIMCALL(NIM_BOOL, shouldrecompile_563621_839829468)(Ropeobj178006* code0, NimStringDesc* cfile0);
N_NIMCALL(NimStringDesc*, toobjfile_273859_2528170400)(NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, writeropeifnotequal_179511_2381377266)(Ropeobj178006* r0, NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, nosexistsFile)(NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, nosfileNewer)(NimStringDesc* a0, NimStringDesc* b0);
N_NIMCALL(void, writemapping_274789_2528170400)(Ropeobj178006* gsymbolmapping0);
N_NIMCALL(void, writeheader_563152_839829468)(Tcgen529027* m0);
N_NIMCALL(void, nossplitFile)(NimStringDesc* path0, TY128506* Result);
N_NIMCALL(void, resetmodule_562763_839829468)(Tcgen529027* m0);
N_NIMCALL(void, nullify_562833_839829468)(Ropeobj178006** arr0);
N_NIMCALL(void, nullify_562858_839829468)(Ropeobj178006** arr0);
STRING_LITERAL(T839829468_4, "\011", 1);
STRING_LITERAL(T839829468_10, "compiler/cgen.nim", 17);
NIM_CONST TY203018 T839829468_9 = {((NimStringDesc*) &T839829468_10),
((NI) 1158)}
;
STRING_LITERAL(T839829468_11, "T", 1);
STRING_LITERAL(T839829468_12, "_", 1);
STRING_LITERAL(T839829468_13, "added pending module twice: ", 28);
STRING_LITERAL(T839829468_14, ".h", 2);
STRING_LITERAL(T839829468_15, ".cpp", 4);
STRING_LITERAL(T839829468_16, ".m", 2);
STRING_LITERAL(T839829468_17, ".c", 2);
STRING_LITERAL(T839829468_18, "0", 1);
STRING_LITERAL(T839829468_19, "$", 1);
STRING_LITERAL(T839829468_20, "ropes: invalid format string $", 30);
STRING_LITERAL(T839829468_21, "$N#line $2 $1$N", 15);
STRING_LITERAL(T839829468_22, "N_LIB_IMPORT ", 13);
STRING_LITERAL(T839829468_23, "N_LIB_EXPORT ", 13);
STRING_LITERAL(T839829468_24, "static ", 7);
STRING_LITERAL(T839829468_25, "mapType", 7);
STRING_LITERAL(T839829468_26, "void", 4);
STRING_LITERAL(T839829468_27, "getTypeDescAux: t == nil", 24);
STRING_LITERAL(T839829468_28, "TY", 2);
STRING_LITERAL(T839829468_29, "getTypeName: ", 13);
STRING_LITERAL(T839829468_30, "void*", 5);
STRING_LITERAL(T839829468_31, "NimStringDesc", 13);
STRING_LITERAL(T839829468_32, "NimStringDesc*", 14);
STRING_LITERAL(T839829468_33, "NCSTRING", 8);
STRING_LITERAL(T839829468_34, "NIM_BOOL", 8);
STRING_LITERAL(T839829468_35, "NIM_CHAR", 8);
STRING_LITERAL(T839829468_36, "NI", 2);
STRING_LITERAL(T839829468_37, "NI8", 3);
STRING_LITERAL(T839829468_38, "NI16", 4);
STRING_LITERAL(T839829468_39, "NI32", 4);
STRING_LITERAL(T839829468_40, "NI64", 4);
STRING_LITERAL(T839829468_41, "NF", 2);
STRING_LITERAL(T839829468_42, "NF32", 4);
STRING_LITERAL(T839829468_43, "NF64", 4);
STRING_LITERAL(T839829468_44, "NF128", 5);
STRING_LITERAL(T839829468_45, "NU", 2);
STRING_LITERAL(T839829468_46, "NU8", 3);
STRING_LITERAL(T839829468_47, "NU16", 4);
STRING_LITERAL(T839829468_48, "NU32", 4);
STRING_LITERAL(T839829468_49, "NU64", 4);
NIM_CONST TY533943 Numericaltypetostr_533941_839829468 = {((NimStringDesc*) &T839829468_36),
((NimStringDesc*) &T839829468_37),
((NimStringDesc*) &T839829468_38),
((NimStringDesc*) &T839829468_39),
((NimStringDesc*) &T839829468_40),
((NimStringDesc*) &T839829468_41),
((NimStringDesc*) &T839829468_42),
((NimStringDesc*) &T839829468_43),
((NimStringDesc*) &T839829468_44),
((NimStringDesc*) &T839829468_45),
((NimStringDesc*) &T839829468_46),
((NimStringDesc*) &T839829468_47),
((NimStringDesc*) &T839829468_48),
((NimStringDesc*) &T839829468_49)}
;
STRING_LITERAL(T839829468_50, "tyStatic for getSimpleTypeDesc", 30);
STRING_LITERAL(T839829468_51, "cannot generate C type for: ", 28);
STRING_LITERAL(T839829468_52, "&", 1);
STRING_LITERAL(T839829468_53, "*", 1);
STRING_LITERAL(T839829468_54, "$1 $2;$n", 8);
STRING_LITERAL(T839829468_55, "typedef $1 $2 $2;$n", 19);
STRING_LITERAL(T839829468_56, "union", 5);
STRING_LITERAL(T839829468_57, "struct", 6);
STRING_LITERAL(T839829468_58, "getTypeForward(", 15);
STRING_LITERAL(T839829468_59, "typedef NI32 $1;$n", 18);
STRING_LITERAL(T839829468_60, "typedef NU8 $1;$n", 17);
STRING_LITERAL(T839829468_61, "typedef NU16 $1;$n", 18);
STRING_LITERAL(T839829468_62, "typedef NI64 $1;$n", 18);
STRING_LITERAL(T839829468_63, "getTypeDescAux: enum", 20);
STRING_LITERAL(T839829468_64, "typedef $1_PTR($2, $3) $4;$n", 28);
STRING_LITERAL(T839829468_65, "N_NIMCALL", 9);
STRING_LITERAL(T839829468_66, "N_STDCALL", 9);
STRING_LITERAL(T839829468_67, "N_CDECL", 7);
STRING_LITERAL(T839829468_68, "N_SAFECALL", 10);
STRING_LITERAL(T839829468_69, "N_SYSCALL", 9);
STRING_LITERAL(T839829468_70, "N_INLINE", 8);
STRING_LITERAL(T839829468_71, "N_NOINLINE", 10);
STRING_LITERAL(T839829468_72, "N_FASTCALL", 10);
STRING_LITERAL(T839829468_73, "N_CLOSURE", 9);
STRING_LITERAL(T839829468_74, "N_NOCONV", 8);
NIM_CONST TY292016 Callingconvtostr_533585_839829468 = {((NimStringDesc*) &T839829468_65),
((NimStringDesc*) &T839829468_66),
((NimStringDesc*) &T839829468_67),
((NimStringDesc*) &T839829468_68),
((NimStringDesc*) &T839829468_69),
((NimStringDesc*) &T839829468_70),
((NimStringDesc*) &T839829468_71),
((NimStringDesc*) &T839829468_72),
((NimStringDesc*) &T839829468_73),
((NimStringDesc*) &T839829468_74)}
;
STRING_LITERAL(T839829468_75, "typedef struct {$nN_NIMCALL_PTR($2, ClPrc) $3;$nvoid* ClEnv;$n}"
" $1;$n", 69);
STRING_LITERAL(T839829468_76, "struct $2 : #TGenericSeq {$n", 28);
STRING_LITERAL(T839829468_77, "struct $2 {$n #TGenericSeq Sup;$n", 34);
STRING_LITERAL(T839829468_78, " $1 data[SEQ_DECL_SIZE];$n};$n", 31);
STRING_LITERAL(T839829468_79, "TGenericSeq", 11);
STRING_LITERAL(T839829468_80, "typedef $1 $2[$3];$n", 20);
STRING_LITERAL(T839829468_81, "invalid apostrophe type parameter index", 39);
STRING_LITERAL(T839829468_82, "<", 1);
STRING_LITERAL(T839829468_83, " COMMA ", 7);
STRING_LITERAL(T839829468_84, "> ", 2);
extern NIM_CONST TY273427 Cc_273413_2528170400;
STRING_LITERAL(T839829468_85, " {$n", 4);
STRING_LITERAL(T839829468_86, " {$n#TNimType* m_type;$n", 24);
STRING_LITERAL(T839829468_87, " : public $1 {$n", 16);
STRING_LITERAL(T839829468_88, " {$n $1 Sup;$n", 15);
STRING_LITERAL(T839829468_89, "genRecordFieldsAux", 18);
STRING_LITERAL(T839829468_90, "$1.$2", 5);
STRING_LITERAL(T839829468_91, "S", 1);
STRING_LITERAL(T839829468_92, "struct {", 8);
STRING_LITERAL(T839829468_93, "} $1;$n", 7);
STRING_LITERAL(T839829468_94, "genRecordFieldsAux(record case branch)", 38);
STRING_LITERAL(T839829468_95, "union{$n$1} $2;$n", 17);
STRING_LITERAL(T839829468_96, "mangleRecFieldName", 18);
STRING_LITERAL(T839829468_97, "$1 $2[SEQ_DECL_SIZE];$n", 23);
STRING_LITERAL(T839829468_98, "$1 $2:$3;$n", 11);
STRING_LITERAL(T839829468_99, "genRecordFieldsAux()", 20);
STRING_LITERAL(T839829468_100, "char dummy;$n", 13);
STRING_LITERAL(T839829468_101, "};", 2);
STRING_LITERAL(T839829468_102, "$1 $2 {$n", 9);
STRING_LITERAL(T839829468_103, "$1 Field$2;$n", 13);
STRING_LITERAL(T839829468_104, "char dummy;", 11);
STRING_LITERAL(T839829468_105, "Set", 3);
STRING_LITERAL(T839829468_106, "typedef NU$2 $1;$n", 18);
STRING_LITERAL(T839829468_107, "typedef NU8 $1[$2];$n", 21);
STRING_LITERAL(T839829468_108, "getTypeDescAux(", 15);
STRING_LITERAL(T839829468_109, "genProcParams", 13);
STRING_LITERAL(T839829468_110, ", ", 2);
STRING_LITERAL(T839829468_111, " ", 1);
STRING_LITERAL(T839829468_112, ", NI $1Len$2", 12);
STRING_LITERAL(T839829468_113, " Result", 7);
STRING_LITERAL(T839829468_114, "void* ClEnv", 11);
STRING_LITERAL(T839829468_115, "...", 3);
STRING_LITERAL(T839829468_116, "void)", 5);
STRING_LITERAL(T839829468_117, ")", 1);
STRING_LITERAL(T839829468_118, "(", 1);
STRING_LITERAL(T839829468_119, "$1($2, $3)$4", 12);
STRING_LITERAL(T839829468_120, "proc has no result symbol", 25);
STRING_LITERAL(T839829468_121, " register", 9);
STRING_LITERAL(T839829468_122, " volatile", 9);
STRING_LITERAL(T839829468_123, "$1 = $2;$n", 10);
STRING_LITERAL(T839829468_124, "(*$1)", 5);
STRING_LITERAL(T839829468_125, ";", 1);
STRING_LITERAL(T839829468_126, "FR.s[$1].address = (void*)$3; FR.s[$1].typ = $4; FR.s[$1].name "
"= $2;$n", 70);
STRING_LITERAL(T839829468_127, "NTI$1", 5);
STRING_LITERAL(T839829468_128, "(&", 2);
STRING_LITERAL(T839829468_129, "TNimType", 8);
STRING_LITERAL(T839829468_130, "TNimNode", 8);
STRING_LITERAL(T839829468_131, "extern TNimType $1; /* $2 */$n", 30);
STRING_LITERAL(T839829468_132, "0", 1);
STRING_LITERAL(T839829468_133, "void*", 5);
STRING_LITERAL(T839829468_134, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53);
STRING_LITERAL(T839829468_135, "$1.flags = $2;$n", 16);
STRING_LITERAL(T839829468_136, "TNimType $1; /* $2 */$n", 23);
STRING_LITERAL(T839829468_137, "genTypeInfo(", 12);
STRING_LITERAL(T839829468_138, "$1[$2]", 6);
STRING_LITERAL(T839829468_139, "static TNimNode* $1[$2];$n", 26);
STRING_LITERAL(T839829468_140, "$1[$2] = &$3;$n", 15);
STRING_LITERAL(T839829468_141, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$"
"n$1.name = \"Field$3\";$n", 86);
STRING_LITERAL(T839829468_142, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45);
STRING_LITERAL(T839829468_143, "$1.len = $2; $1.kind = 2;$n", 27);
STRING_LITERAL(T839829468_144, "$1.node = &$2;$n", 16);
STRING_LITERAL(T839829468_145, "#nimGCvisit((void*)$1, op);$n", 29);
STRING_LITERAL(T839829468_146, "N_NIMCALL(void, $1)(void* p, NI op)", 35);
STRING_LITERAL(T839829468_147, "$1 a;$n", 7);
STRING_LITERAL(T839829468_148, "a = ($1)p;$n", 12);
STRING_LITERAL(T839829468_149, "LOC", 3);
STRING_LITERAL(T839829468_150, "$1 = ($2)0;$n", 13);
STRING_LITERAL(T839829468_151, "<string.h>", 10);
STRING_LITERAL(T839829468_152, "memset((void*)$1, 0, sizeof($2));$n", 35);
STRING_LITERAL(T839829468_153, ".Sup", 4);
STRING_LITERAL(T839829468_154, "$1.m_type = $2;$n", 17);
STRING_LITERAL(T839829468_155, "#objectInit($1, $2);$n", 22);
STRING_LITERAL(T839829468_156, "for ($1 = 0; $1 < $2->$3; $1++) {$n", 35);
STRING_LITERAL(T839829468_157, "len", 3);
STRING_LITERAL(T839829468_158, "Sup.len", 7);
STRING_LITERAL(T839829468_159, "for ($1 = 0; $1 < $2; $1++) {$n", 31);
STRING_LITERAL(T839829468_160, "}$n", 3);
STRING_LITERAL(T839829468_161, "$1.Sup", 6);
STRING_LITERAL(T839829468_162, "genTraverseProc", 15);
STRING_LITERAL(T839829468_163, "switch ($1.$2) {$n", 18);
STRING_LITERAL(T839829468_164, "case $1 ... $2:$n", 17);
STRING_LITERAL(T839829468_165, "genLiteral: ty is nil", 21);
STRING_LITERAL(T839829468_166, "(-2147483647 -1)", 16);
STRING_LITERAL(T839829468_167, "IL64($1)", 8);
STRING_LITERAL(T839829468_168, "(IL64(-9223372036854775807) - IL64(1))", 38);
STRING_LITERAL(T839829468_169, "NIM_TRUE", 8);
STRING_LITERAL(T839829468_170, "NIM_FALSE", 9);
STRING_LITERAL(T839829468_171, "ULL", 3);
STRING_LITERAL(T839829468_172, "(($1) $2)", 9);
STRING_LITERAL(T839829468_173, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45);
STRING_LITERAL(T839829468_174, "NIM_NIL", 7);
STRING_LITERAL(T839829468_175, "((#NimStringDesc*) NIM_NIL)", 27);
STRING_LITERAL(T839829468_176, "((#NimStringDesc*) &$1)", 23);
STRING_LITERAL(T839829468_177, "STRING_LITERAL($1, $2, $3);$n", 29);
STRING_LITERAL(T839829468_178, "((#NimStringDesc*) &$1$2)", 25);
STRING_LITERAL(T839829468_179, "genLiteral(", 11);
STRING_LITERAL(T839829468_180, "case $1:$n", 10);
STRING_LITERAL(T839829468_181, "default:$n", 10);
STRING_LITERAL(T839829468_182, "break;$n", 8);
STRING_LITERAL(T839829468_183, "} $n", 4);
STRING_LITERAL(T839829468_184, "genTraverseProc()", 17);
STRING_LITERAL(T839829468_185, "$1.Field$2", 10);
STRING_LITERAL(T839829468_186, "$1.ClEnv", 8);
STRING_LITERAL(T839829468_187, "$1->data[$2]", 12);
STRING_LITERAL(T839829468_188, "a", 1);
STRING_LITERAL(T839829468_189, "(*a)", 4);
STRING_LITERAL(T839829468_190, "$1 {$n$2$3$4}$n", 15);
STRING_LITERAL(T839829468_191, "$1;$n", 5);
STRING_LITERAL(T839829468_192, "$1.marker = $2;$n", 17);
STRING_LITERAL(T839829468_193, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43);
STRING_LITERAL(T839829468_194, "$1.offset = $2;$n", 17);
STRING_LITERAL(T839829468_195, "NI $1;$n", 8);
STRING_LITERAL(T839829468_196, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41);
STRING_LITERAL(T839829468_197, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o"
"ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127);
STRING_LITERAL(T839829468_198, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61);
STRING_LITERAL(T839829468_199, "$1.flags = 1<<2;$n", 18);
STRING_LITERAL(T839829468_200, "anonymous obj with discriminator", 32);
STRING_LITERAL(T839829468_201, "NimDT_$1_$2", 11);
STRING_LITERAL(T839829468_202, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107);
STRING_LITERAL(T839829468_203, "TNimNode* $1[$2];$n", 19);
STRING_LITERAL(T839829468_204, "genObjectFields; nkOfBranch broken", 34);
STRING_LITERAL(T839829468_205, "genObjectFields(nkRecCase)", 26);
STRING_LITERAL(T839829468_206, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n", 74);
STRING_LITERAL(T839829468_207, "genObjectFields", 15);
STRING_LITERAL(T839829468_208, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49);
STRING_LITERAL(T839829468_209, "\011return $1;$n", 13);
STRING_LITERAL(T839829468_210, "Result", 6);
STRING_LITERAL(T839829468_211, "closure generation failed", 25);
STRING_LITERAL(T839829468_212, "$1 = ($2) ClEnv;$n", 18);
STRING_LITERAL(T839829468_213, "__declspec(noreturn) ", 21);
STRING_LITERAL(T839829468_214, "__declspec(naked) ", 18);
STRING_LITERAL(T839829468_215, "$N$1 {$n$2$3$4}$N$N", 19);
STRING_LITERAL(T839829468_216, "$N$1 {$N", 8);
STRING_LITERAL(T839829468_217, "struct {$1} GCFRAME;$n", 22);
STRING_LITERAL(T839829468_218, "nimFrame", 8);
STRING_LITERAL(T839829468_219, "VarSlot", 7);
STRING_LITERAL(T839829468_220, "\011nimfrs($1, $2, $3, $4)$N", 25);
STRING_LITERAL(T839829468_221, "\011nimfr($1, $2)$N", 16);
STRING_LITERAL(T839829468_222, "\011#nimProfile();$n", 17);
STRING_LITERAL(T839829468_223, "{", 1);
STRING_LITERAL(T839829468_224, "\011}BeforeRet: ;$n", 16);
STRING_LITERAL(T839829468_225, "if (((NU)&GCFRAME) < 4096) #nimGCFrame(&GCFRAME);$n", 51);
STRING_LITERAL(T839829468_226, "\011#popFrame();$n", 15);
STRING_LITERAL(T839829468_227, "}$N", 3);
STRING_LITERAL(T839829468_228, "static void* $1;$n", 18);
STRING_LITERAL(T839829468_229, "||", 2);
STRING_LITERAL(T839829468_230, "($1 = #nimLoadLibrary((#NimStringDesc*) &$2))$n", 47);
STRING_LITERAL(T839829468_231, "if (!($1)) #nimLoadLibraryError((#NimStringDesc*) &$2);$n", 57);
STRING_LITERAL(T839829468_232, "if (!($1 = #nimLoadLibrary($2))) #nimLoadLibraryError($2);$n", 60);
STRING_LITERAL(T839829468_233, "loadDynamicLib", 14);
STRING_LITERAL(T839829468_234, "Dl_$1", 5);
STRING_LITERAL(T839829468_235, "\011$1 = ($2) ($3$4));$n", 21);
NIM_CONST TY203018 T839829468_236 = {((NimStringDesc*) &T839829468_10),
((NI) 535)}
;
STRING_LITERAL(T839829468_237, "wrong index: ", 13);
STRING_LITERAL(T839829468_238, "\011$1 = ($2) #nimGetProcAddr($3, $4);$n", 37);
STRING_LITERAL(T839829468_239, "$2 $1;$n", 8);
STRING_LITERAL(T839829468_240, "extern ", 7);
STRING_LITERAL(T839829468_241, "NIM_THREADVAR ", 14);
STRING_LITERAL(T839829468_242, " $1;$n", 6);
STRING_LITERAL(T839829468_243, "cgsym: ", 7);
STRING_LITERAL(T839829468_244, ": ", 2);
STRING_LITERAL(T839829468_245, "extern $1 $2;$n", 15);
STRING_LITERAL(T839829468_246, "extern \"C\" ", 11);
STRING_LITERAL(T839829468_247, " __attribute__((naked))", 23);
STRING_LITERAL(T839829468_248, " __attribute__((noreturn))", 26);
STRING_LITERAL(T839829468_249, "#asgnRef((void**) $1, $2);$n", 28);
STRING_LITERAL(T839829468_250, "#asgnRefNoCycle((void**) $1, $2);$n", 35);
STRING_LITERAL(T839829468_251, "#unsureAsgnRef((void**) $1, $2);$n", 34);
STRING_LITERAL(T839829468_252, "#genericSeqAssign($1, $2, $3);$n", 32);
STRING_LITERAL(T839829468_253, "$1 = #copyString($2);$n", 23);
STRING_LITERAL(T839829468_254, "$3 = $1; $1 = #copyStringRC1($2);$n", 35);
STRING_LITERAL(T839829468_255, "if ($1) #nimGCunrefNoCycle($1);$n", 33);
STRING_LITERAL(T839829468_256, "#unsureAsgnRef((void**) $1, #copyString($2));$n", 47);
STRING_LITERAL(T839829468_257, ".", 1);
STRING_LITERAL(T839829468_258, "ClEnv", 5);
STRING_LITERAL(T839829468_259, "$1.ClPrc = $2.ClPrc;$n", 22);
STRING_LITERAL(T839829468_260, "Field$1", 7);
STRING_LITERAL(T839829468_261, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($3));$n", 53);
STRING_LITERAL(T839829468_262, "#genericShallowAssign((void*)$1, (void*)$2, $3);$n", 50);
STRING_LITERAL(T839829468_263, "#genericAssign((void*)$1, (void*)$2, $3);$n", 43);
STRING_LITERAL(T839829468_265, "compiler/ccgexprs.nim", 21);
NIM_CONST TY203018 T839829468_264 = {((NimStringDesc*) &T839829468_265),
((NI) 320)}
;
STRING_LITERAL(T839829468_266, "#genericAssignOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 60);
STRING_LITERAL(T839829468_267, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($1[0])*$1Len0);$n", 63);
STRING_LITERAL(T839829468_268, "memcpy((void*)$1, (NIM_CONST void*)$2, $3);$n", 45);
STRING_LITERAL(T839829468_269, "genAssignment: ", 15);
STRING_LITERAL(T839829468_270, "request to generate code for .compileTime proc: ", 48);
STRING_LITERAL(T839829468_271, "expr: proc not init ", 20);
STRING_LITERAL(T839829468_272, "NIM_CONST $1 $2 = $3;$n", 23);
STRING_LITERAL(T839829468_273, "{$n", 3);
STRING_LITERAL(T839829468_274, "0x$1,$n", 7);
STRING_LITERAL(T839829468_275, "0x$1, ", 6);
STRING_LITERAL(T839829468_276, "0x$1}$n", 7);
STRING_LITERAL(T839829468_277, "{{$1, $1}", 9);
STRING_LITERAL(T839829468_278, ", {", 3);
STRING_LITERAL(T839829468_279, ",$n", 3);
STRING_LITERAL(T839829468_280, "}", 1);
STRING_LITERAL(T839829468_281, "NIM_CONST struct {$n #TGenericSeq Sup;$n $1 data[$2];$n} $3 ="
" $4;$n", 69);
STRING_LITERAL(T839829468_282, "(($1)&$2)", 9);
STRING_LITERAL(T839829468_283, "$1,$n", 5);
STRING_LITERAL(T839829468_284, "extern NIM_CONST $1 $2;$n", 25);
STRING_LITERAL(T839829468_285, "expr: var not init ", 19);
STRING_LITERAL(T839829468_286, "\011NimThreadVars* NimTV;$n", 24);
STRING_LITERAL(T839829468_287, "\011NimTV = (NimThreadVars*) #GetThreadLocalVars();$n", 50);
STRING_LITERAL(T839829468_288, "NimTV->", 7);
STRING_LITERAL(T839829468_289, "expr: temp not init ", 20);
STRING_LITERAL(T839829468_290, "expr: param not init ", 21);
STRING_LITERAL(T839829468_291, "expr(", 5);
STRING_LITERAL(T839829468_292, "); unknown symbol", 17);
STRING_LITERAL(T839829468_293, "//", 2);
STRING_LITERAL(T839829468_294, "#endb($1, $2);$n", 16);
STRING_LITERAL(T839829468_295, "nimln($1, $2);$n", 16);
STRING_LITERAL(T839829468_296, "LA", 2);
STRING_LITERAL(T839829468_297, "if ($1) goto $2;$n", 18);
STRING_LITERAL(T839829468_298, "if (!($1)) goto $2;$n", 21);
STRING_LITERAL(T839829468_299, "$1: ;$n", 7);
STRING_LITERAL(T839829468_300, "!($1)", 5);
STRING_LITERAL(T839829468_301, "$1", 2);
STRING_LITERAL(T839829468_302, "($3)((NU$2) ~($1))", 18);
STRING_LITERAL(T839829468_303, "-($1)", 5);
STRING_LITERAL(T839829468_304, "($1 > 0? ($1) : -($1))", 22);
STRING_LITERAL(T839829468_305, "(($3)(NU)(NU8)($1))", 19);
STRING_LITERAL(T839829468_306, "(($3)(NU64)(NU8)($1))", 21);
STRING_LITERAL(T839829468_307, "(($3)(NU)(NU16)($1))", 20);
STRING_LITERAL(T839829468_308, "(($3)(NU64)(NU16)($1))", 22);
STRING_LITERAL(T839829468_309, "(($3)(NU64)(NU32)($1))", 22);
STRING_LITERAL(T839829468_310, "(($3)(NU64)(NU)($1))", 20);
STRING_LITERAL(T839829468_311, "(($3)(NU8)(NU)($1))", 19);
STRING_LITERAL(T839829468_312, "(($3)(NU16)(NU)($1))", 20);
STRING_LITERAL(T839829468_313, "(($3)(NU32)(NU64)($1))", 22);
STRING_LITERAL(T839829468_314, "((double) ($1))", 15);
STRING_LITERAL(T839829468_315, "float64ToInt32($1)", 18);
STRING_LITERAL(T839829468_316, "float64ToInt64($1)", 18);
NIM_CONST TY552655 unarithtab_552653_839829468 = {((NimStringDesc*) &T839829468_300),
((NimStringDesc*) &T839829468_301),
((NimStringDesc*) &T839829468_302),
((NimStringDesc*) &T839829468_301),
((NimStringDesc*) &T839829468_303),
((NimStringDesc*) &T839829468_304),
((NimStringDesc*) &T839829468_305),
((NimStringDesc*) &T839829468_306),
((NimStringDesc*) &T839829468_307),
((NimStringDesc*) &T839829468_308),
((NimStringDesc*) &T839829468_309),
((NimStringDesc*) &T839829468_310),
((NimStringDesc*) &T839829468_311),
((NimStringDesc*) &T839829468_312),
((NimStringDesc*) &T839829468_313),
((NimStringDesc*) &T839829468_314),
((NimStringDesc*) &T839829468_314),
((NimStringDesc*) &T839829468_315),
((NimStringDesc*) &T839829468_316)}
;
STRING_LITERAL(T839829468_317, "if ($1 == $2) #raiseOverflow();$n", 33);
STRING_LITERAL(T839829468_318, "((NI$2)-($1))", 13);
NIM_CONST TY551642 opr_551640_839829468 = {((NimStringDesc*) &T839829468_318),
((NimStringDesc*) &T839829468_303),
((NimStringDesc*) &T839829468_304)}
;
STRING_LITERAL(T839829468_319, "(($4)($2) $1 ($4)($3))", 22);
STRING_LITERAL(T839829468_320, "+", 1);
STRING_LITERAL(T839829468_321, "-", 1);
STRING_LITERAL(T839829468_322, "/", 1);
NIM_CONST TY556764 opr_556762_839829468 = {((NimStringDesc*) &T839829468_320),
((NimStringDesc*) &T839829468_321),
((NimStringDesc*) &T839829468_53),
((NimStringDesc*) &T839829468_322)}
;
STRING_LITERAL(T839829468_323, "#nanCheck($1);$n", 16);
STRING_LITERAL(T839829468_324, "#infCheck($1);$n", 16);
STRING_LITERAL(T839829468_325, "(($4)($1) + ($4)($2))", 21);
STRING_LITERAL(T839829468_326, "(($4)($1) - ($4)($2))", 21);
STRING_LITERAL(T839829468_327, "(($4)($1) * ($4)($2))", 21);
STRING_LITERAL(T839829468_328, "(($4)($1) / ($4)($2))", 21);
STRING_LITERAL(T839829468_329, "($4)((NU$3)($1) >> (NU$3)($2))", 30);
STRING_LITERAL(T839829468_330, "($4)((NU$3)($1) << (NU$3)($2))", 30);
STRING_LITERAL(T839829468_331, "($4)($1 & $2)", 13);
STRING_LITERAL(T839829468_332, "($4)($1 | $2)", 13);
STRING_LITERAL(T839829468_333, "($4)($1 ^ $2)", 13);
STRING_LITERAL(T839829468_334, "(($1 <= $2) ? $1 : $2)", 22);
STRING_LITERAL(T839829468_335, "(($1 >= $2) ? $1 : $2)", 22);
STRING_LITERAL(T839829468_336, "($4)((NU$3)($1) + (NU$3)($2))", 29);
STRING_LITERAL(T839829468_337, "($4)((NU$3)($1) - (NU$3)($2))", 29);
STRING_LITERAL(T839829468_338, "($4)((NU$3)($1) * (NU$3)($2))", 29);
STRING_LITERAL(T839829468_339, "($4)((NU$3)($1) / (NU$3)($2))", 29);
STRING_LITERAL(T839829468_340, "($4)((NU$3)($1) % (NU$3)($2))", 29);
STRING_LITERAL(T839829468_341, "($1 == $2)", 10);
STRING_LITERAL(T839829468_342, "($1 <= $2)", 10);
STRING_LITERAL(T839829468_343, "($1 < $2)", 9);
STRING_LITERAL(T839829468_344, "((NU$3)($1) <= (NU$3)($2))", 26);
STRING_LITERAL(T839829468_345, "((NU$3)($1) < (NU$3)($2))", 25);
STRING_LITERAL(T839829468_346, "((NU64)($1) <= (NU64)($2))", 26);
STRING_LITERAL(T839829468_347, "((NU64)($1) < (NU64)($2))", 25);
STRING_LITERAL(T839829468_348, "((NU8)($1) == (NU8)($2))", 24);
STRING_LITERAL(T839829468_349, "((NU8)($1) <= (NU8)($2))", 24);
STRING_LITERAL(T839829468_350, "((NU8)($1) < (NU8)($2))", 23);
STRING_LITERAL(T839829468_351, "($1 != $2)", 10);
NIM_CONST TY551828 binarithtab_551826_839829468 = {((NimStringDesc*) &T839829468_325),
((NimStringDesc*) &T839829468_326),
((NimStringDesc*) &T839829468_327),
((NimStringDesc*) &T839829468_328),
((NimStringDesc*) &T839829468_329),
((NimStringDesc*) &T839829468_330),
((NimStringDesc*) &T839829468_331),
((NimStringDesc*) &T839829468_332),
((NimStringDesc*) &T839829468_333),
((NimStringDesc*) &T839829468_334),
((NimStringDesc*) &T839829468_335),
((NimStringDesc*) &T839829468_334),
((NimStringDesc*) &T839829468_335),
((NimStringDesc*) &T839829468_336),
((NimStringDesc*) &T839829468_337),
((NimStringDesc*) &T839829468_338),
((NimStringDesc*) &T839829468_339),
((NimStringDesc*) &T839829468_340),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_344),
((NimStringDesc*) &T839829468_345),
((NimStringDesc*) &T839829468_346),
((NimStringDesc*) &T839829468_347),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_348),
((NimStringDesc*) &T839829468_349),
((NimStringDesc*) &T839829468_350),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_351)}
;
STRING_LITERAL(T839829468_352, "($1.ClPrc == $2.ClPrc && $1.ClEnv == $2.ClEnv)", 46);
STRING_LITERAL(T839829468_353, "($#)($# + $#)", 13);
STRING_LITERAL(T839829468_354, "($#)($# - $#)", 13);
STRING_LITERAL(T839829468_355, "($#)($# * $#)", 13);
STRING_LITERAL(T839829468_356, "($#)($# / $#)", 13);
STRING_LITERAL(T839829468_357, "($#)($# % $#)", 13);
NIM_CONST TY551281 opr_551279_839829468 = {((NimStringDesc*) &T839829468_353),
((NimStringDesc*) &T839829468_354),
((NimStringDesc*) &T839829468_355),
((NimStringDesc*) &T839829468_356),
((NimStringDesc*) &T839829468_357),
((NimStringDesc*) &T839829468_353),
((NimStringDesc*) &T839829468_354)}
;
STRING_LITERAL(T839829468_358, "((NU8)($1))", 11);
STRING_LITERAL(T839829468_359, "if ($1 < $2 || $1 > $3) #raiseOverflow();$n", 43);
STRING_LITERAL(T839829468_360, "$# = #addInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_361, "$# = #subInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_362, "$# = #mulInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_363, "$# = #divInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_364, "$# = #modInt64($#, $#);$n", 25);
NIM_CONST TY551281 prc64_551274_839829468 = {((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361),
((NimStringDesc*) &T839829468_362),
((NimStringDesc*) &T839829468_363),
((NimStringDesc*) &T839829468_364),
((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361)}
;
STRING_LITERAL(T839829468_365, "$# = #addInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_366, "$# = #subInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_367, "$# = #mulInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_368, "$# = #divInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_369, "$# = #modInt($#, $#);$n", 23);
NIM_CONST TY551281 prc_551269_839829468 = {((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366),
((NimStringDesc*) &T839829468_367),
((NimStringDesc*) &T839829468_368),
((NimStringDesc*) &T839829468_369),
((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366)}
;
STRING_LITERAL(T839829468_370, "($#)($#)", 8);
STRING_LITERAL(T839829468_371, "#reprInt((NI64)$1)", 18);
STRING_LITERAL(T839829468_372, "#reprFloat($1)", 14);
STRING_LITERAL(T839829468_373, "#reprBool($1)", 13);
STRING_LITERAL(T839829468_374, "#reprChar($1)", 13);
STRING_LITERAL(T839829468_375, "#reprEnum((NI)$1, $2)", 21);
STRING_LITERAL(T839829468_376, "#reprStr($1)", 12);
STRING_LITERAL(T839829468_377, "#reprSet($1, $2)", 16);
STRING_LITERAL(T839829468_378, "$1, $1Len0", 10);
STRING_LITERAL(T839829468_379, "$1->data, $1->$2", 16);
STRING_LITERAL(T839829468_380, "$1, $2", 6);
STRING_LITERAL(T839829468_381, "genRepr()", 9);
STRING_LITERAL(T839829468_382, "#reprOpenArray($1, $2)", 22);
STRING_LITERAL(T839829468_383, "#reprAny($1, $2)", 16);
STRING_LITERAL(T839829468_384, "\'repr\' doesn\'t support \'void\' type", 34);
STRING_LITERAL(T839829468_385, "($1 - 1)", 8);
STRING_LITERAL(T839829468_386, "#subInt($1, 1)", 14);
STRING_LITERAL(T839829468_387, "binaryStmt", 10);
STRING_LITERAL(T839829468_388, "$1 += $2;$n", 11);
STRING_LITERAL(T839829468_389, "$1 -= $2;$n", 11);
NIM_CONST TY557052 opr_557050_839829468 = {((NimStringDesc*) &T839829468_388),
((NimStringDesc*) &T839829468_389)}
;
NIM_CONST TY557052 fun64_557055_839829468 = {((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361)}
;
NIM_CONST TY557052 fun_557060_839829468 = {((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366)}
;
STRING_LITERAL(T839829468_390, "#appendChar($1, $2);$n", 22);
STRING_LITERAL(T839829468_391, "$1->$2 + ", 9);
STRING_LITERAL(T839829468_392, "#appendString($1, $2);$n", 24);
STRING_LITERAL(T839829468_393, "$1 = #rawNewString($2$3);$n", 27);
STRING_LITERAL(T839829468_394, "$1 = #addChar($1, $2);$n", 24);
STRING_LITERAL(T839829468_395, "$1 = #resizeString($1, $2$3);$n", 31);
STRING_LITERAL(T839829468_396, "$1 = ($2) #incrSeqV2(&($1)->Sup, sizeof($3));$n", 47);
STRING_LITERAL(T839829468_397, "$1 = ($2) #incrSeqV2($1, sizeof($3));$n", 39);
STRING_LITERAL(T839829468_398, "$1->data[$1->$2]", 16);
STRING_LITERAL(T839829468_399, "++$1->$2;$n", 11);
STRING_LITERAL(T839829468_400, "(($1) && ($1)->$2 == 0)", 23);
STRING_LITERAL(T839829468_401, "#eqStrings($1, $2)", 18);
STRING_LITERAL(T839829468_402, "(#cmpStrings($1, $2) <= 0)", 26);
STRING_LITERAL(T839829468_403, "(#cmpStrings($1, $2) < 0)", 25);
STRING_LITERAL(T839829468_404, "$1.ClPrc == 0", 13);
STRING_LITERAL(T839829468_405, "$1 == 0", 7);
STRING_LITERAL(T839829468_406, "#nimIntToStr($1)", 16);
STRING_LITERAL(T839829468_407, "#nimInt64ToStr($1)", 18);
STRING_LITERAL(T839829468_408, "#nimBoolToStr($1)", 17);
STRING_LITERAL(T839829468_409, "#nimCharToStr($1)", 17);
STRING_LITERAL(T839829468_410, "#nimFloatToStr($1)", 18);
STRING_LITERAL(T839829468_411, "#cstrToNimstr($1)", 17);
STRING_LITERAL(T839829468_412, "no \'of\' operator available for pure objects", 43);
STRING_LITERAL(T839829468_413, "(($1) && ($2))", 14);
STRING_LITERAL(T839829468_414, "$1.m_type == $2", 15);
STRING_LITERAL(T839829468_415, "Nim_OfCheck_CACHE", 17);
STRING_LITERAL(T839829468_416, "static TNimType* $#[2];$n", 25);
STRING_LITERAL(T839829468_417, "#isObjWithCache($#.m_type, $#, $#)", 34);
STRING_LITERAL(T839829468_418, "($1)", 4);
STRING_LITERAL(T839829468_419, "sizeof($1)", 10);
STRING_LITERAL(T839829468_420, "if ($1) #nimGCunref($1);$n", 26);
STRING_LITERAL(T839829468_421, "($1) #newObjRC1($2, $3)", 23);
STRING_LITERAL(T839829468_422, "($1) #newObj($2, $3)", 20);
STRING_LITERAL(T839829468_423, "$1->finalizer = (void*)$2;$n", 28);
STRING_LITERAL(T839829468_424, "($1) #newObj($2, sizeof($3))", 28);
STRING_LITERAL(T839829468_425, "($1) #newSeqRC1($2, $3)", 23);
STRING_LITERAL(T839829468_426, "($1) #newSeq($2, $3)", 20);
STRING_LITERAL(T839829468_427, "($1)#nimNewSeqOfCap($2, $3)", 27);
STRING_LITERAL(T839829468_428, "((NI)sizeof($1))", 16);
STRING_LITERAL(T839829468_429, "(*($1*) ($2))", 13);
STRING_LITERAL(T839829468_430, "(($1) ($2))", 11);
STRING_LITERAL(T839829468_431, "($1Len0-1)", 10);
STRING_LITERAL(T839829468_432, "$1Len0", 6);
STRING_LITERAL(T839829468_433, "($1 ? (strlen($1)-1) : -1)", 26);
STRING_LITERAL(T839829468_434, "($1 ? strlen($1) : 0)", 21);
STRING_LITERAL(T839829468_435, "($1 ? ($1->Sup.len-1) : -1)", 27);
STRING_LITERAL(T839829468_436, "($1 ? $1->Sup.len : 0)", 22);
STRING_LITERAL(T839829468_437, "($1 ? ($1->len-1) : -1)", 23);
STRING_LITERAL(T839829468_438, "($1 ? $1->len : 0)", 18);
STRING_LITERAL(T839829468_439, "genArrayLen()", 13);
STRING_LITERAL(T839829468_440, "($1->Sup.len)", 13);
STRING_LITERAL(T839829468_441, "$1->len", 7);
STRING_LITERAL(T839829468_442, "unaryStmt", 9);
STRING_LITERAL(T839829468_443, "#nimGCref($1);$n", 16);
STRING_LITERAL(T839829468_444, "#nimGCunref($1);$n", 18);
STRING_LITERAL(T839829468_445, "$1 = #setLengthStr($1, $2);$n", 29);
STRING_LITERAL(T839829468_446, "$1 = ($3) #setLengthSeq(&($1)->Sup, sizeof($4), $2);$n", 54);
STRING_LITERAL(T839829468_447, "$1 = ($3) #setLengthSeq($1, sizeof($4), $2);$n", 46);
STRING_LITERAL(T839829468_448, "($1- $2)", 8);
STRING_LITERAL(T839829468_449, "$1 |= ((", 8);
STRING_LITERAL(T839829468_450, ")1)<<(($2)%(sizeof(", 19);
STRING_LITERAL(T839829468_451, ")*8));$n", 8);
STRING_LITERAL(T839829468_452, "$1 &= ~(((", 10);
STRING_LITERAL(T839829468_453, ")1) << (($2) % (sizeof(", 23);
STRING_LITERAL(T839829468_454, ")*8)));$n", 9);
STRING_LITERAL(T839829468_455, "#countBits32($1)", 16);
STRING_LITERAL(T839829468_456, "#countBits64($1)", 16);
STRING_LITERAL(T839829468_457, "(($1 & ~ $2 ==0)&&($1 != $2))", 29);
STRING_LITERAL(T839829468_458, "(($1 & ~ $2)==0)", 16);
STRING_LITERAL(T839829468_459, "($1 & $2)", 9);
STRING_LITERAL(T839829468_460, "($1 | $2)", 9);
STRING_LITERAL(T839829468_461, "($1 & ~ $2)", 11);
STRING_LITERAL(T839829468_462, "($1 ^ $2)", 9);
STRING_LITERAL(T839829468_463, "fewCmps", 7);
STRING_LITERAL(T839829468_464, "$1 >= $2 && $1 <= $3", 20);
STRING_LITERAL(T839829468_465, "$1 == $2", 8);
STRING_LITERAL(T839829468_466, " || ", 4);
STRING_LITERAL(T839829468_467, "(($1 &(1U<<((NU)($2)&7U)))!=0)", 30);
STRING_LITERAL(T839829468_468, "(($1 &(1U<<((NU)($2)&15U)))!=0)", 31);
STRING_LITERAL(T839829468_469, "(($1 &(1U<<((NU)($2)&31U)))!=0)", 31);
STRING_LITERAL(T839829468_470, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36);
STRING_LITERAL(T839829468_471, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43);
STRING_LITERAL(T839829468_472, "genSetOp()", 10);
STRING_LITERAL(T839829468_473, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34);
STRING_LITERAL(T839829468_474, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36);
STRING_LITERAL(T839829468_475, "#cardSet($1, ", 13);
STRING_LITERAL(T839829468_476, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == "
"0);$n if (!$3) break;}$n", 88);
STRING_LITERAL(T839829468_477, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == "
"0);$n if (!$3) break;}$nif ($3) $3 = (memcmp($4, $5, $2) != 0);"
"$n", 129);
STRING_LITERAL(T839829468_478, "|", 1);
STRING_LITERAL(T839829468_479, "& ~", 3);
STRING_LITERAL(T839829468_480, "^", 1);
NIM_CONST TY556428 lookupopr_556426_839829468 = {((NimStringDesc*) &T839829468_476),
((NimStringDesc*) &T839829468_477),
((NimStringDesc*) &T839829468_52),
((NimStringDesc*) &T839829468_478),
((NimStringDesc*) &T839829468_479),
((NimStringDesc*) &T839829468_480)}
;
STRING_LITERAL(T839829468_481, "(memcmp($1, $2, ", 16);
STRING_LITERAL(T839829468_482, ")==0)", 5);
STRING_LITERAL(T839829468_483, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60);
STRING_LITERAL(T839829468_484, "genSetOp", 8);
STRING_LITERAL(T839829468_485, "$1->data", 8);
STRING_LITERAL(T839829468_486, "($1)+($2), ($3)-($2)+1", 22);
STRING_LITERAL(T839829468_487, "(*$1)->data+($2), ($3)-($2)+1", 29);
STRING_LITERAL(T839829468_488, "$1->data+($2), ($3)-($2)+1", 26);
STRING_LITERAL(T839829468_489, "openArrayLoc: ", 14);
STRING_LITERAL(T839829468_490, "", 0);
STRING_LITERAL(T839829468_491, "(*$1)->data, (*$1)->$2", 22);
STRING_LITERAL(T839829468_492, "$1.ClPrc($3$1.ClEnv)", 20);
STRING_LITERAL(T839829468_493, "$1.ClEnv? $1.ClPrc($3$1.ClEnv):(($4)($1.ClPrc))($2)", 51);
STRING_LITERAL(T839829468_494, "$1 = 0;$n", 9);
STRING_LITERAL(T839829468_495, "#chckNil((void*)$1);$n", 22);
STRING_LITERAL(T839829468_496, "#genericReset((void*)$1, $2);$n", 31);
STRING_LITERAL(T839829468_497, ";$n", 3);
STRING_LITERAL(T839829468_499, "compiler/ccgcalls.nim", 21);
NIM_CONST TY203018 T839829468_498 = {((NimStringDesc*) &T839829468_499),
((NI) 423)}
;
static NIM_CONST char136Set T839829468_500 = {
0x00, 0x00, 0x00, 0x00, 0x88, 0x01, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
;
STRING_LITERAL(T839829468_501, "wrong argument count", 20);
STRING_LITERAL(T839829468_502, "call expression expected for C++ pattern", 40);
NIM_CONST TY203018 T839829468_503 = {((NimStringDesc*) &T839829468_499),
((NI) 328)}
;
STRING_LITERAL(T839829468_504, "->", 2);
STRING_LITERAL(T839829468_505, ");$n", 4);
STRING_LITERAL(T839829468_506, "[", 1);
NIM_CONST TY203018 T839829468_507 = {((NimStringDesc*) &T839829468_499),
((NI) 472)}
;
STRING_LITERAL(T839829468_508, "varargs for objective C method?", 31);
STRING_LITERAL(T839829468_509, "Result: ", 8);
STRING_LITERAL(T839829468_510, "];$n", 4);
STRING_LITERAL(T839829468_511, "]", 1);
NIM_CONST TY203018 T839829468_512 = {((NimStringDesc*) &T839829468_265),
((NI) 925)}
;
STRING_LITERAL(T839829468_513, "<stdio.h>", 9);
STRING_LITERAL(T839829468_514, ", \"nil\"", 7);
STRING_LITERAL(T839829468_515, ", $1? ($1)->data:\"nil\"", 22);
STRING_LITERAL(T839829468_516, "printf($1$2);$n", 15);
STRING_LITERAL(T839829468_517, "%s", 2);
STRING_LITERAL(T839829468_518, "fflush(stdout);$n", 17);
STRING_LITERAL(T839829468_519, "#genericDeepCopy((void*)$1, (void*)$2, $3);$n", 45);
STRING_LITERAL(T839829468_520, "#genericSeqDeepCopy($1, $2, $3);$n", 34);
STRING_LITERAL(T839829468_521, "#genericDeepCopyOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 62);
STRING_LITERAL(T839829468_522, "genDeepCopy: ", 13);
STRING_LITERAL(T839829468_523, "genMagicExpr: ", 14);
STRING_LITERAL(T839829468_524, "static NIM_CONST $1 $2 = $3;$n", 30);
STRING_LITERAL(T839829468_525, "memset($1, 0, sizeof($1));$n", 28);
STRING_LITERAL(T839829468_526, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1"
")&7U));$n", 72);
STRING_LITERAL(T839829468_527, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40);
STRING_LITERAL(T839829468_528, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=((", 39);
STRING_LITERAL(T839829468_529, ")(1)<<(($1)%(sizeof(", 20);
STRING_LITERAL(T839829468_530, "$1 |=((", 7);
STRING_LITERAL(T839829468_531, ")(1)<<(($2)%(sizeof(", 20);
STRING_LITERAL(T839829468_532, "genCheckedRecordField", 21);
STRING_LITERAL(T839829468_533, "genObjConstr", 12);
STRING_LITERAL(T839829468_534, "if ($1) #raiseFieldError(((#NimStringDesc*) &$2));$n", 52);
STRING_LITERAL(T839829468_535, "if (!($1)) #raiseFieldError(((#NimStringDesc*) &$2));$n", 55);
STRING_LITERAL(T839829468_536, "LOC$1.source", 12);
STRING_LITERAL(T839829468_537, "union { $1 source; $2 dest; } LOC$3;$n", 38);
STRING_LITERAL(T839829468_538, "LOC$#.dest", 10);
STRING_LITERAL(T839829468_539, "if ((NU)($1) > (NU)($2)) #raiseIndexError();$n", 46);
STRING_LITERAL(T839829468_540, "if ($1 < $2 || $1 > $3) #raiseIndexError();$n", 45);
STRING_LITERAL(T839829468_541, "$1[($2)- $3]", 12);
STRING_LITERAL(T839829468_542, "if ((NU)($1) >= (NU)($2Len0)) #raiseIndexError();$n", 51);
STRING_LITERAL(T839829468_543, "if ((NU)($1) > (NU)($2->$3)) #raiseIndexError();$n", 50);
STRING_LITERAL(T839829468_544, "if ((NU)($1) >= (NU)($2->$3)) #raiseIndexError();$n", 51);
STRING_LITERAL(T839829468_545, "genTupleElem", 12);
STRING_LITERAL(T839829468_546, ".Field$1", 8);
STRING_LITERAL(T839829468_547, "expr(nkBracketExpr, ", 20);
STRING_LITERAL(T839829468_548, "genDeref ", 9);
STRING_LITERAL(T839829468_549, "genRecordFieldAux", 17);
STRING_LITERAL(T839829468_550, "genRecordField 3", 16);
STRING_LITERAL(T839829468_551, ".$1", 3);
STRING_LITERAL(T839829468_552, "} $1: ;$n", 9);
STRING_LITERAL(T839829468_553, "FR.len-=$1;$n", 13);
STRING_LITERAL(T839829468_554, "FR.len+=$1;$n", 13);
STRING_LITERAL(T839829468_555, "if (!$1) goto $2;$n", 19);
STRING_LITERAL(T839829468_556, "goto $1;$n", 10);
STRING_LITERAL(T839829468_557, "genIf()", 7);
STRING_LITERAL(T839829468_558, "->Sup", 5);
STRING_LITERAL(T839829468_559, "$1 = &$2;$n", 11);
STRING_LITERAL(T839829468_560, "if ($1) #chckObj($2.m_type, $3);$n", 34);
STRING_LITERAL(T839829468_561, "#chckObj($1.m_type, $2);$n", 26);
STRING_LITERAL(T839829468_562, "(($1)#$5($2, $3, $4))", 21);
STRING_LITERAL(T839829468_563, "chckRangeF", 10);
STRING_LITERAL(T839829468_564, "chckRange64", 11);
STRING_LITERAL(T839829468_565, "chckRange", 9);
STRING_LITERAL(T839829468_566, "CNSTCLOSURE", 11);
STRING_LITERAL(T839829468_567, "closure to closure created", 26);
STRING_LITERAL(T839829468_568, "$1.ClPrc = $2; $1.ClEnv = $3;$n", 31);
STRING_LITERAL(T839829468_569, "while (1) {$n", 13);
STRING_LITERAL(T839829468_570, "case statement must be exhaustive for computed goto", 51);
STRING_LITERAL(T839829468_571, "case statement has too many cases for computed goto", 51);
STRING_LITERAL(T839829468_572, "case statement has to start at 0 for computed goto", 50);
STRING_LITERAL(T839829468_573, "no case statement found for computed goto", 41);
STRING_LITERAL(T839829468_574, "TMP$1", 5);
STRING_LITERAL(T839829468_575, "static void* $#[$#] = {", 23);
STRING_LITERAL(T839829468_576, "&&TMP$#, ", 9);
STRING_LITERAL(T839829468_577, "&&TMP$#};$n", 11);
STRING_LITERAL(T839829468_578, "goto *$#[$#];$n", 15);
STRING_LITERAL(T839829468_579, "range notation not available for computed goto", 46);
STRING_LITERAL(T839829468_580, "TMP$#:$n", 8);
STRING_LITERAL(T839829468_581, "#nimProfile();$n", 16);
STRING_LITERAL(T839829468_582, "\'goto\' target must be a literal value", 37);
STRING_LITERAL(T839829468_583, "goto NIMSTATE_$#;$n", 19);
STRING_LITERAL(T839829468_584, "$1 = ($2*) #nimGetProcAddr($3, $4);$n", 37);
STRING_LITERAL(T839829468_585, "$2* $1;$n", 9);
STRING_LITERAL(T839829468_586, "#dbgRegisterGlobal($1, &$2, $3);$n", 34);
STRING_LITERAL(T839829468_587, "#nimGCvisit((void*)$1, 0);$n", 28);
STRING_LITERAL(T839829468_588, "N_NIMCALL(void, $1)(void)", 25);
STRING_LITERAL(T839829468_589, "#nimRegisterGlobalMarker($1);$n", 31);
STRING_LITERAL(T839829468_590, "$#($#);$n", 9);
STRING_LITERAL(T839829468_591, "$# = $#;$n", 10);
STRING_LITERAL(T839829468_592, "genVarTuple", 11);
STRING_LITERAL(T839829468_593, "genConstStmt", 12);
STRING_LITERAL(T839829468_594, "for statement not eliminated", 28);
STRING_LITERAL(T839829468_595, "if (#eqStrings($1, $2)) goto $3;$n", 34);
STRING_LITERAL(T839829468_596, "switch (#hashString($1) & $2) {$n", 33);
STRING_LITERAL(T839829468_597, "case $1: $n$2break;$n", 21);
STRING_LITERAL(T839829468_598, "goto LA$1;$n", 12);
STRING_LITERAL(T839829468_599, "LA$1: ;$n", 9);
STRING_LITERAL(T839829468_600, "if ($1 >= $2 && $1 <= $3) goto $4;$n", 36);
STRING_LITERAL(T839829468_601, "if ($1 == $2) goto $3;$n", 24);
STRING_LITERAL(T839829468_602, "NIMSTATE_$#:$n", 14);
STRING_LITERAL(T839829468_603, "switch ($1) {$n", 15);
STRING_LITERAL(T839829468_604, "default: __assume(0);$n", 23);
STRING_LITERAL(T839829468_605, "#popSafePoint();$n", 18);
STRING_LITERAL(T839829468_606, "#popCurrentException();$n", 25);
STRING_LITERAL(T839829468_607, "if ($1.status != 0) #popCurrentException();$n", 45);
STRING_LITERAL(T839829468_608, "goto BeforeRet;$n", 17);
STRING_LITERAL(T839829468_609, "no loop to break", 16);
STRING_LITERAL(T839829468_610, "extern $1", 9);
STRING_LITERAL(T839829468_611, "#FieldDiscriminantCheck((NI)(NU)($1), (NI)(NU)($2), $3, $4);$n", 62);
STRING_LITERAL(T839829468_612, "genAsmOrEmitStmt()", 18);
STRING_LITERAL(T839829468_613, "\"", 1);
STRING_LITERAL(T839829468_614, "\\n\"\012", 4);
STRING_LITERAL(T839829468_615, "Exception", 9);
STRING_LITERAL(T839829468_616, "E_Base", 6);
STRING_LITERAL(T839829468_617, "try {$n", 7);
STRING_LITERAL(T839829468_618, "} catch (NimException& $1) {$n", 30);
STRING_LITERAL(T839829468_619, "#setFrame((TFrame*)&FR);$n", 26);
STRING_LITERAL(T839829468_620, "else ", 5);
STRING_LITERAL(T839829468_621, "#isObj($1.exp->m_type, $2)", 26);
STRING_LITERAL(T839829468_622, "if ($1) ", 8);
STRING_LITERAL(T839829468_623, "throw;$n", 8);
STRING_LITERAL(T839829468_624, "<setjmp.h>", 10);
STRING_LITERAL(T839829468_625, "#TSafePoint $1;$n", 17);
STRING_LITERAL(T839829468_626, "#pushSafePoint(&$1);$n", 22);
STRING_LITERAL(T839829468_627, "nimStdSetjmp", 12);
STRING_LITERAL(T839829468_628, "$1.status = setjmp($1.context);$n", 33);
STRING_LITERAL(T839829468_629, "nimSigSetjmp", 12);
STRING_LITERAL(T839829468_630, "$1.status = sigsetjmp($1.context, 0);$n", 39);
STRING_LITERAL(T839829468_631, "nimRawSetjmp", 12);
STRING_LITERAL(T839829468_632, "$1.status = _setjmp($1.context);$n", 34);
STRING_LITERAL(T839829468_633, "if ($1.status == 0) {$n", 23);
STRING_LITERAL(T839829468_634, "else {$n", 8);
STRING_LITERAL(T839829468_635, "else", 4);
STRING_LITERAL(T839829468_636, "$1.status = 0;$n", 16);
STRING_LITERAL(T839829468_637, "#isObj(#getCurrentException()->Sup.m_type, $1)", 46);
STRING_LITERAL(T839829468_638, "#isObj(#getCurrentException()->m_type, $1)", 42);
STRING_LITERAL(T839829468_639, "if ($1) {$n", 11);
STRING_LITERAL(T839829468_640, "if ($1.status != 0) #reraiseException();$n", 42);
STRING_LITERAL(T839829468_641, "#raiseException((#Exception*)$1, $2);$n", 39);
STRING_LITERAL(T839829468_642, "#reraiseException();$n", 22);
STRING_LITERAL(T839829468_643, "/*TYPESECTION*/", 15);
STRING_LITERAL(T839829468_644, "/*VARSECTION*/", 14);
STRING_LITERAL(T839829468_645, "/*INCLUDESECTION*/", 18);
STRING_LITERAL(T839829468_646, "bp", 2);
STRING_LITERAL(T839829468_647, "#dbgRegisterBreakpoint($1, (NCSTRING)$2, (NCSTRING)$3);$n", 57);
STRING_LITERAL(T839829468_648, "#dbgRegisterWatchpoint($1, (NCSTRING)$2, $3);$n", 47);
STRING_LITERAL(T839829468_649, "#pragma omp parallel for $4$nfor ($1 = $2; $1 <= $3; ++$1)", 58);
STRING_LITERAL(T839829468_651, "compiler/ccgstmts.nim", 21);
NIM_CONST TY203018 T839829468_650 = {((NimStringDesc*) &T839829468_651),
((NI) 145)}
;
STRING_LITERAL(T839829468_652, "STATE$1: ;$n", 12);
STRING_LITERAL(T839829468_653, "case -1: goto BeforeRet;$n", 26);
STRING_LITERAL(T839829468_654, "case $1: goto STATE$1;$n", 24);
STRING_LITERAL(T839829468_655, "if (((NI*) $1)[0] < 0) break;$n", 31);
STRING_LITERAL(T839829468_656, "if ((((NI*) $1.ClEnv)[0]) < 0) break;$n", 39);
STRING_LITERAL(T839829468_657, "); unknown node kind", 20);
NIM_CONST TY203018 T839829468_658 = {((NimStringDesc*) &T839829468_651),
((NI) 1122)}
;
STRING_LITERAL(T839829468_659, "Init000", 7);
STRING_LITERAL(T839829468_660, "DatInit000", 10);
STRING_LITERAL(T839829468_661, "NIM_EXTERNC N_NOINLINE(void, $1)(void);$N", 41);
STRING_LITERAL(T839829468_662, "\011$1();$N", 8);
STRING_LITERAL(T839829468_663, "N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDECL(void, NimMa"
"in)(void) {$N\011void (*volatile inner)();$N\011PreMain();$N\011inner = N"
"imMainInner;$N$2\011(*inner)();$N}$N$N", 162);
STRING_LITERAL(T839829468_664, "N_STDCALL(int, WinMain)(HINSTANCE hCurInstance, $N "
" HINSTANCE hPrevInstance, $N LP"
"STR lpCmdLine, int nCmdShow) {$N\011NimMain();$N\011return nim_program"
"_result;$N}$N$N", 206);
STRING_LITERAL(T839829468_665, "N_LIB_EXPORT N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDEC"
"L(void, NimMain)(void) {$N\011void (*volatile inner)();$N\011PreMain()"
";$N\011inner = NimMainInner;$N$2\011(*inner)();$N}$N$N", 175);
STRING_LITERAL(T839829468_666, "BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fwdreason, $N "
" LPVOID lpvReserved) {$N\011if(fwdreason == DLL_PROC"
"ESS_ATTACH) {$N\011NimMain();$N}$N\011return 1;$N}$N$N", 175);
STRING_LITERAL(T839829468_667, "<windows.h>", 11);
STRING_LITERAL(T839829468_668, "void NIM_POSIX_INIT NimMainInit(void) {$N\011NimMain();$N}$N$N", 59);
STRING_LITERAL(T839829468_669, "int cmdCount;$Nchar** cmdLine;$Nchar** gEnv;$NN_CDECL(void, Nim"
"MainInner)(void) {$N$1}$N$NN_CDECL(void, NimMain)(void) {$N\011void"
" (*volatile inner)();$N\011PreMain();$N\011inner = NimMainInner;$N$2\011("
"*inner)();$N}$N$N", 208);
STRING_LITERAL(T839829468_670, "int main(void) {$N\011NimMain();$N\011return 0;$N}$N$N", 48);
STRING_LITERAL(T839829468_671, "int main(int argc, char** args, char** env) {$N\011cmdLine = args;"
"$N\011cmdCount = argc;$N\011gEnv = env;$N\011NimMain();$N\011return nim_prog"
"ram_result;$N}$N$N", 145);
STRING_LITERAL(T839829468_672, "dbgRegisterBreakpoint", 21);
STRING_LITERAL(T839829468_673, "dbgRegisterFilename", 19);
STRING_LITERAL(T839829468_674, "dbgRegisterFilename($1);$N", 26);
STRING_LITERAL(T839829468_675, "\011#initStackBottomWith((void *)&inner);$N", 40);
STRING_LITERAL(T839829468_676, "void PreMainInner() {$N\011systemInit000();$N$1$2$3}$N$Nvoid PreMa"
"in() {$N\011void (*volatile inner)();$N\011systemDatInit000();$N\011inner"
" = PreMainInner;$N$4$5\011(*inner)();$N}$N$N", 168);
STRING_LITERAL(T839829468_677, "\011#initThreadVarsEmulation();$N", 30);
STRING_LITERAL(T839829468_678, "still forwarded: ", 17);
STRING_LITERAL(T839829468_679, "NIM_EXTERNC N_NOINLINE(void, $1)(void) {$N", 42);
STRING_LITERAL(T839829468_680, "static #TNimNode $1[$2];$n", 26);
STRING_LITERAL(T839829468_681, "static #TNimType $1[$2];$n", 26);
STRING_LITERAL(T839829468_682, "\011TFrame FR; FR.len = 0;$N", 25);
STRING_LITERAL(T839829468_683, "}$N$N", 5);
STRING_LITERAL(T839829468_684, "N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 46);
STRING_LITERAL(T839829468_685, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N", 131);
STRING_LITERAL(T839829468_686, "0.15.0", 6);
STRING_LITERAL(T839829468_687, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n"
" $5 */$N", 201);
extern NIM_CONST TY176082 Os_176068_4151366050;
extern NIM_CONST TY176510 Cpu_176496_4151366050;
STRING_LITERAL(T839829468_688, "#define NIM_INTBITS $1", 22);
STRING_LITERAL(T839829468_689, "typedef struct {$1} NimThreadVars;$n", 36);
STRING_LITERAL(T839829468_690, "#include \"nimbase.h\"", 20);
STRING_LITERAL(T839829468_691, "#include \"$1\"$N", 15);
STRING_LITERAL(T839829468_692, "#include $1$N", 13);
STRING_LITERAL(T839829468_693, "extern \"C\"", 10);
STRING_LITERAL(T839829468_694, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61);
STRING_LITERAL(T839829468_695, "__$1__", 6);
STRING_LITERAL(T839829468_696, "#ifndef $1$n#define $1$n", 24);
STRING_LITERAL(T839829468_697, "N_CDECL(void, NimMain)(void);$n", 31);
STRING_LITERAL(T839829468_698, "#endif /* $1 */$n", 17);
Tcgen529027* generatedheader_532201_839829468;
extern TNimType NTI529015; /* BModule */
Ropeobj178006* indent_532655_839829468;
extern TNimType NTI178004; /* Rope */
extern Gcheap50218 gch_50258_1689653243;
Ropeobj178006* nimtv_538656_839829468;
Ttypeseq292836* nimtvdeps_538674_839829468;
extern TNimType NTI292836; /* TTypeSeq */
Intset268030 nimtvdeclared_538675_839829468;
extern TNimType NTI268030; /* IntSet */
NI breakpointid_548860_839829468;
Ropeobj178006* gbreakpoints_548861_839829468;
extern TY529153* gmodules_529170_3723162438;
extern TNimType NTI529027; /* TCGen */
extern Debuginfo203009 gdebuginfo_203470_1926258066;
extern Toption169009Set goptions_169128_2607990831;
extern TNimType NTI292804; /* TSymSeq */
extern Tglobaloption169013Set gglobaloptions_169130_2607990831;
extern NimStringDesc* headerfile_169138_2607990831;
extern NimStringDesc* gprojectfull_169211_2607990831;
extern Tcommands169076 gcmd_169132_2607990831;
extern NI gerrorcounter_192072_155036129;
extern Ropeobj178006* rnl_178903_2381377266;
extern NI gforwardedprocscounter_529171_3723162438;
extern TNimType NTI292244; /* TTypeKind */
extern TNimType NTI203017; /* seq[(string, int)] */
extern Tsystemcc273002 ccompiler_273431_2528170400;
extern NimStringDesc* tnl_176644_4151366050;
extern NI floatsize_176642_4151366050;
extern Tgcmode169080 gselectedgc_169133_2607990831;
extern TNimType NTI292020; /* TNodeKind */
extern TNimType NTI135002; /* seq[string] */
extern TNimType NTI292435; /* TSymKind */
extern TNimType NTI292816; /* TLoc */
extern NI intsize_176641_4151366050;
extern TNimType NTI292524; /* TMagic */
extern TNimType NTI191350; /* seq[Rope] */
extern TNimType NTI292796; /* TNodeSeq */
extern Ropeobj178006* mainmodprocs_529148_3723162438;
extern Ropeobj178006* maindatinit_529151_3723162438;
extern Ropeobj178006* mainmodinit_529149_3723162438;
extern Ropeobj178006* othermodsinit_529150_3723162438;
extern Tsystemos176004 targetos_176629_4151366050;
extern TY191612* fileinfos_191629_155036129;
extern Tsystemcpu176452 targetcpu_176627_4151366050;
extern Ropeobj178006* gmapping_529152_3723162438;
N_NIMCALL(void, T839829468_2)(void) {
nimGCvisit((void*)generatedheader_532201_839829468, 0);
}
N_NIMCALL(void, T839829468_3)(void) {
nimGCvisit((void*)indent_532655_839829468, 0);
}
static N_INLINE(Cell47705*, usrtocell_51840_1689653243)(void* usr0) {
Cell47705* result0;
result0 = (Cell47705*)0;
result0 = ((Cell47705*) ((NI)((NU32)(((NI) (usr0))) - (NU32)(((NI)sizeof(Cell47705))))));
return result0;
}
static N_INLINE(void, rtladdzct_53001_1689653243)(Cell47705* c0) {
addzct_51817_1689653243((&gch_50258_1689653243.zct), c0);
}
static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0) {
{
Cell47705* c0;
if (!!((src0 == NIM_NIL))) goto LA3;
c0 = usrtocell_51840_1689653243(src0);
(*c0).refcount += ((NI) 8);
}
LA3: ;
{
Cell47705* c0;
if (!!(((*dest0) == NIM_NIL))) goto LA7;
c0 = usrtocell_51840_1689653243((*dest0));
{
(*c0).refcount -= ((NI) 8);
if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA11;
rtladdzct_53001_1689653243(c0);
}
LA11: ;
}
LA7: ;
(*dest0) = src0;
}
N_NIMCALL(void, T839829468_5)(void) {
nimGCvisit((void*)nimtv_538656_839829468, 0);
}
N_NIMCALL(void, T839829468_6)(void) {
nimGCvisit((void*)nimtvdeps_538674_839829468, 0);
}
static N_INLINE(void, nimGCunrefNoCycle)(void* p0) {
Cell47705* c0;
c0 = usrtocell_51840_1689653243(p0);
{
(*c0).refcount -= ((NI) 8);
if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA3;
rtladdzct_53001_1689653243(c0);
}
LA3: ;
}
N_NIMCALL(void, T839829468_7)(void) {
nimGCvisit((void*)nimtvdeclared_538675_839829468.head, 0);
nimGCvisit((void*)nimtvdeclared_538675_839829468.data, 0);
}
N_NIMCALL(void, T839829468_8)(void) {
nimGCvisit((void*)gbreakpoints_548861_839829468, 0);
}
N_NIMCALL(Tcgen529027*, getcgenmodule_532226_839829468)(Tsym292834* s0) {
Tcgen529027* result0;
result0 = (Tcgen529027*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (((NI) 0) <= (*s0).position);
if (!(LOC3)) goto LA4;
LOC3 = ((*s0).position < (gmodules_529170_3723162438 ? gmodules_529170_3723162438->Sup.len : 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = gmodules_529170_3723162438->data[(*s0).position];
}
goto LA1;
LA5: ;
{
result0 = NIM_NIL;
}
LA1: ;
return result0;
}
static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0) {
void* LOC1;
LOC1 = (void*)0;
LOC1 = memcpy(dest0, source0, ((size_t) (size0)));
}
static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0) {
copymem_7485_1689653243(((void*) ((&(*dest0).data[((*dest0).Sup.len)- 0]))), ((void*) ((*src0).data)), ((NI) ((NI)((*src0).Sup.len + ((NI) 1)))));
(*dest0).Sup.len += (*src0).Sup.len;
}
N_NIMCALL(NU32, hashowner_532977_839829468)(Tsym292834* s0) {
NU32 result0;
Tsym292834* m0;
Tsym292834* p0;
result0 = (NU32)0;
m0 = s0;
{
while (1) {
if (!!(((*m0).kind == ((Tsymkind292435) 6)))) goto LA2;
m0 = (*m0).owner;
} LA2: ;
}
p0 = (*m0).owner;
result0 = register_203121_1926258066((&gdebuginfo_203470_1926258066), (*(*p0).name).s, (*(*m0).name).s);
return result0;
}
static N_INLINE(void, incref_53819_1689653243)(Cell47705* c0) {
(*c0).refcount = (NI)((NU32)((*c0).refcount) + (NU32)(((NI) 8)));
}
static N_INLINE(void, decref_53401_1689653243)(Cell47705* c0) {
{
(*c0).refcount -= ((NI) 8);
if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA3;
rtladdzct_53001_1689653243(c0);
}
LA3: ;
}
static N_INLINE(void, asgnRef)(void** dest0, void* src0) {
{
Cell47705* LOC5;
if (!!((src0 == NIM_NIL))) goto LA3;
LOC5 = (Cell47705*)0;
LOC5 = usrtocell_51840_1689653243(src0);
incref_53819_1689653243(LOC5);
}
LA3: ;
{
Cell47705* LOC10;
if (!!(((*dest0) == NIM_NIL))) goto LA8;
LOC10 = (Cell47705*)0;
LOC10 = usrtocell_51840_1689653243((*dest0));
decref_53401_1689653243(LOC10);
}
LA8: ;
(*dest0) = src0;
}
N_NIMCALL(Toption169009Set, initprocoptions_562635_839829468)(Tcgen529027* m0) {
Toption169009Set result0;
memset((void*)(&result0), 0, sizeof(result0));
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA3;
result0 = (goptions_169128_2607990831 & ~ 32768);
}
goto LA1;
LA3: ;
{
result0 = goptions_169128_2607990831;
}
LA1: ;
return result0;
}
N_NIMCALL(Tcproc529021*, newpreinitproc_562625_839829468)(Tcgen529027* m0) {
Tcproc529021* result0;
result0 = (Tcproc529021*)0;
result0 = newproc_529206_3723162438(NIM_NIL, m0);
(*result0).labels = ((NI) 100000);
return result0;
}
N_NIMCALL(Tcproc529021*, newpostinitproc_562630_839829468)(Tcgen529027* m0) {
Tcproc529021* result0;
result0 = (Tcproc529021*)0;
result0 = newproc_529206_3723162438(NIM_NIL, m0);
(*result0).labels = ((NI) 200000);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettempname_533596_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*m0).labels)));
result0 = HEX26_178418_2381377266((*m0).tmpbase, LOC1);
(*m0).labels += ((NI) 1);
return result0;
}
N_NIMCALL(Tcgen529027*, rawnewmodule_562663_839829468)(Tsym292834* module0, NimStringDesc* filename0) {
Tcgen529027* result0;
NimStringDesc* LOC1;
NU32 LOC2;
NimStringDesc* LOC3;
NimStringDesc* LOC4;
NimStringDesc* LOC5;
result0 = (Tcgen529027*)0;
result0 = (Tcgen529027*) newObj((&NTI529015), sizeof(Tcgen529027));
(*result0).Sup.Sup.m_type = (&NTI529027);
LOC1 = (NimStringDesc*)0;
LOC2 = (NU32)0;
LOC2 = hashowner_532977_839829468(module0);
LOC3 = (NimStringDesc*)0;
LOC3 = HEX24_8401_1689653243(((NU64) (LOC2)));
LOC1 = rawNewString(LOC3->Sup.len + 2);
appendString(LOC1, ((NimStringDesc*) &T839829468_11));
appendString(LOC1, LOC3);
appendString(LOC1, ((NimStringDesc*) &T839829468_12));
asgnRefNoCycle((void**) (&(*result0).tmpbase), rope_178277_2381377266(LOC1));
initlinkedlist_147031_3771138726((&(*result0).headerfiles));
initintset_268885_2627731572((&(*result0).declaredthings));
initintset_268885_2627731572((&(*result0).declaredprotos));
LOC4 = (NimStringDesc*)0;
LOC4 = (*result0).cfilename; (*result0).cfilename = copyStringRC1(filename0);
if (LOC4) nimGCunrefNoCycle(LOC4);
LOC5 = (NimStringDesc*)0;
LOC5 = (*result0).filename; (*result0).filename = copyStringRC1(filename0);
if (LOC5) nimGCunrefNoCycle(LOC5);
initidtable_296019_850551059((&(*result0).typecache));
initidtable_296019_850551059((&(*result0).forwtypecache));
asgnRefNoCycle((void**) (&(*result0).module), module0);
initintset_268885_2627731572((&(*result0).typeinfomarker));
asgnRef((void**) (&(*result0).initproc), newproc_529206_3723162438(NIM_NIL, result0));
(*(*result0).initproc).options = initprocoptions_562635_839829468(result0);
asgnRef((void**) (&(*result0).preinitproc), newpreinitproc_562625_839829468(result0));
asgnRef((void**) (&(*result0).postinitproc), newpostinitproc_562630_839829468(result0));
initnodetable_296085_850551059((&(*result0).datacache));
if ((*result0).typestack) nimGCunrefNoCycle((*result0).typestack);
(*result0).typestack = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
if ((*result0).forwardedprocs) nimGCunrefNoCycle((*result0).forwardedprocs);
(*result0).forwardedprocs = (Tsymseq292804*) newSeqRC1((&NTI292804), 0);
asgnRefNoCycle((void**) (&(*result0).typenodesname), gettempname_533596_839829468(result0));
asgnRefNoCycle((void**) (&(*result0).nimtypesname), gettempname_533596_839829468(result0));
{
if (!(((*module0).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA8;
(*result0).flags |= ((NU8)1)<<((((Codegenflag529025) 0))%(sizeof(NU8)*8));
(*(*result0).preinitproc).options &= ~(((NU32)1) << ((((Toption169009) 15)) % (sizeof(NU32)*8)));
(*(*result0).postinitproc).options &= ~(((NU32)1) << ((((Toption169009) 15)) % (sizeof(NU32)*8)));
}
LA8: ;
return result0;
}
N_NIMCALL(Tcgen529027*, rawnewmodule_563038_839829468)(Tsym292834* module0) {
Tcgen529027* result0;
NimStringDesc* LOC1;
result0 = (Tcgen529027*)0;
LOC1 = (NimStringDesc*)0;
LOC1 = tofullpath_192264_155036129(((NI32) ((*module0).position)));
result0 = rawnewmodule_562663_839829468(module0, LOC1);
return result0;
}
N_NIMCALL(Tcgen529027*, newmodule_563045_839829468)(Tsym292834* module0) {
Tcgen529027* result0;
result0 = (Tcgen529027*)0;
{
Tcgen529027* LOC3;
NimStringDesc* LOC6;
LOC3 = (Tcgen529027*)0;
LOC3 = getcgenmodule_532226_839829468(module0);
if (!!((LOC3 == NIM_NIL))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_9);
internalerror_196113_155036129(LOC6);
}
LA4: ;
result0 = rawnewmodule_563038_839829468(module0);
{
if (!((gmodules_529170_3723162438 ? gmodules_529170_3723162438->Sup.len : 0) <= (*module0).position)) goto LA9;
gmodules_529170_3723162438 = (TY529153*) setLengthSeq(&(gmodules_529170_3723162438)->Sup, sizeof(Tcgen529027*), ((NI) ((NI)((*module0).position + ((NI) 1)))));
}
LA9: ;
asgnRef((void**) (&gmodules_529170_3723162438->data[(*module0).position]), result0);
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0)) goto LA13;
{
NimStringDesc* LOC19;
NimStringDesc* LOC20;
if (!(((*module0).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0)) goto LA17;
LOC19 = (NimStringDesc*)0;
LOC20 = (NimStringDesc*)0;
LOC20 = tofilename_192260_155036129(((NI32) ((*module0).position)));
LOC19 = rawNewString(LOC20->Sup.len + 28);
appendString(LOC19, ((NimStringDesc*) &T839829468_13));
appendString(LOC19, LOC20);
internalerror_196113_155036129(LOC19);
}
LA17: ;
}
LA13: ;
return result0;
}
N_NIMCALL(Tpasscontext341002*, myopen_563115_839829468)(Tsym292834* module0) {
Tpasscontext341002* result0;
Tcgen529027* LOC1;
result0 = (Tpasscontext341002*)0;
LOC1 = (Tcgen529027*)0;
LOC1 = newmodule_563045_839829468(module0);
result0 = &LOC1->Sup;
{
NIM_BOOL LOC4;
NimStringDesc* f0;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
LOC4 = (NIM_BOOL)0;
LOC4 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 27))&63U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = (generatedheader_532201_839829468 == NIM_NIL);
LA5: ;
if (!LOC4) goto LA6;
{
if (!(((NI) 0) < (headerfile_169138_2607990831 ? headerfile_169138_2607990831->Sup.len : 0))) goto LA10;
f0 = headerfile_169138_2607990831;
}
goto LA8;
LA10: ;
{
f0 = gprojectfull_169211_2607990831;
}
LA8: ;
LOC13 = (NimStringDesc*)0;
LOC13 = completecfilepath_273854_2528170400(f0, NIM_TRUE);
LOC14 = (NimStringDesc*)0;
LOC14 = noschangeFileExt(LOC13, ((NimStringDesc*) &T839829468_14));
asgnRef((void**) (&generatedheader_532201_839829468), rawnewmodule_562663_839829468(module0, LOC14));
(*generatedheader_532201_839829468).flags |= ((NU8)1)<<((((Codegenflag529025) 3))%(sizeof(NU8)*8));
}
LA6: ;
return result0;
}
N_NIMCALL(NimStringDesc*, getcfile_563204_839829468)(Tcgen529027* m0) {
NimStringDesc* result0;
NimStringDesc* ext0;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
result0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
ext0 = copyString(((NimStringDesc*) &T839829468_15));
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = (gcmd_169132_2607990831 == ((Tcommands169076) 3));
if (LOC8) goto LA9;
LOC8 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA9: ;
if (!LOC8) goto LA10;
ext0 = copyString(((NimStringDesc*) &T839829468_16));
}
goto LA1;
LA10: ;
{
ext0 = copyString(((NimStringDesc*) &T839829468_17));
}
LA1: ;
LOC13 = (NimStringDesc*)0;
LOC13 = withpackagename_170073_2607990831((*m0).cfilename);
LOC14 = (NimStringDesc*)0;
LOC14 = completecfilepath_273854_2528170400(LOC13, NIM_TRUE);
result0 = noschangeFileExt(LOC14, ext0);
return result0;
}
N_NIMCALL(Tpasscontext341002*, myopencached_563249_839829468)(Tsym292834* module0, Trodreader332021* rd0) {
Tpasscontext341002* result0;
Tcgen529027* m0;
NimStringDesc* LOC1;
result0 = (Tpasscontext341002*)0;
m0 = newmodule_563045_839829468(module0);
LOC1 = (NimStringDesc*)0;
LOC1 = getcfile_563204_839829468(m0);
readmergeinfo_530613_2760143328(LOC1, m0);
result0 = &m0->Sup;
return result0;
}
static N_INLINE(NIM_BOOL, skipcodegen_341085_2355241294)(Tnode292802* n0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = (((NI) 0) < gerrorcounter_192072_155036129);
return result0;
}
N_NIMCALL(void, fillloc_532282_839829468)(Tloc292816* a0, Tlockind292808 k0, Ttype292840* typ0, Ropeobj178006* r0, Tstorageloc292812 s0) {
{
if (!((*a0).k == ((Tlockind292808) 0))) goto LA3;
(*a0).k = k0;
unsureAsgnRef((void**) (&(*a0).t), typ0);
(*a0).s = s0;
{
if (!((*a0).r == NIM_NIL)) goto LA7;
unsureAsgnRef((void**) (&(*a0).r), r0);
}
LA7: ;
}
LA3: ;
}
N_NIMCALL(NIM_BOOL, iskeyword_532960_839829468)(Tident199010* w0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
switch ((*w0).Sup.id) {
case ((NI) 200) ... ((NI) 262):
case ((NI) 4) ... ((NI) 70):
case ((NI) 138):
{
result0 = NIM_TRUE;
goto BeforeRet;
}
break;
default:
{
result0 = NIM_FALSE;
goto BeforeRet;
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, manglename_533205_839829468)(Tsym292834* s0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*s0).loc.r;
{
NIM_BOOL keeporigname0;
NIM_BOOL LOC5;
NIM_BOOL LOC6;
NIM_BOOL LOC9;
NimStringDesc* LOC10;
if (!(result0 == NIM_NIL)) goto LA3;
LOC5 = (NIM_BOOL)0;
LOC6 = (NIM_BOOL)0;
LOC6 = ((2824 &(1U<<((NU)((*s0).kind)&31U)))!=0);
if (!(LOC6)) goto LA7;
LOC6 = ((IL64(2149580812) & (*s0).flags) == 0);
LA7: ;
LOC5 = LOC6;
if (!(LOC5)) goto LA8;
LOC9 = (NIM_BOOL)0;
LOC9 = iskeyword_532960_839829468((*s0).name);
LOC5 = !(LOC9);
LA8: ;
keeporigname0 = LOC5;
LOC10 = (NimStringDesc*)0;
LOC10 = mangle_528847_2036603609((*(*s0).name).s);
result0 = rope_178277_2381377266(LOC10);
{
if (!keeporigname0) goto LA13;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_18));
}
goto LA11;
LA13: ;
{
TY533289 LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC18;
TY533289 LOC19;
Ropeobj178006* LOC20;
NU32 LOC21;
Ropeobj178006* LOC22;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_12), LOC16, 0);
add_178482_2381377266(&result0, LOC17);
LOC18 = (Ropeobj178006*)0;
LOC18 = rope_178401_2381377266(((NI64) ((*s0).Sup.id)));
add_178482_2381377266(&result0, LOC18);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (Ropeobj178006*)0;
LOC20 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_12), LOC19, 0);
add_178482_2381377266(&result0, LOC20);
LOC21 = (NU32)0;
LOC21 = hashowner_532977_839829468(s0);
LOC22 = (Ropeobj178006*)0;
LOC22 = rope_178401_2381377266(((NI64) (LOC21)));
add_178482_2381377266(&result0, LOC22);
}
LA11: ;
asgnRefNoCycle((void**) (&(*s0).loc.r), result0);
}
LA3: ;
return result0;
}
N_NIMCALL(void, fillprocloc_539201_839829468)(Tsym292834* sym0) {
{
Ropeobj178006* LOC5;
if (!((*sym0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 7), (*sym0).typ, LOC5, ((Tstorageloc292812) 2));
}
LA3: ;
}
N_NIMCALL(void, useheader_532369_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
{
NimStringDesc* LOC5;
NIM_BOOL LOC6;
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 6))&15U)))!=0)) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = getstr_297230_850551059((*(*sym0).annex).path);
LOC6 = (NIM_BOOL)0;
LOC6 = includestr_147249_3771138726((&(*m0).headerfiles), LOC5);
}
LA3: ;
}
static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0) {
(*dest0).data[((*dest0).Sup.len)- 0] = c0;
(*dest0).data[((NI)((*dest0).Sup.len + ((NI) 1)))- 0] = 0;
(*dest0).Sup.len += ((NI) 1);
}
N_NIMCALL(NIM_BOOL, isactivated_561431_839829468)(Tsym292834* prc0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = !(((*prc0).typ == NIM_NIL));
return result0;
}
N_NIMCALL(void, addforwardedproc_532203_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
(*m0).forwardedprocs = (Tsymseq292804*) incrSeqV2(&((*m0).forwardedprocs)->Sup, sizeof(Tsym292834*));
asgnRefNoCycle((void**) (&(*m0).forwardedprocs->data[(*m0).forwardedprocs->Sup.len]), prc0);
++(*m0).forwardedprocs->Sup.len;
gforwardedprocscounter_529171_3723162438 += ((NI) 1);
}
N_NIMCALL(void, genclinedir_532725_839829468)(Ropeobj178006** r0, NimStringDesc* filename0, NI line0) {
{
TY532811 LOC5;
NimStringDesc* LOC6;
if (!((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 10))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (NimStringDesc*)0;
LOC6 = makesinglelinecstring_528835_2036603609(filename0);
LOC5[0] = rope_178277_2381377266(LOC6);
LOC5[1] = rope_178401_2381377266(((NI64) (line0)));
addf_179205_2381377266(r0, ((NimStringDesc*) &T839829468_21), LOC5, 2);
}
LA3: ;
}
static N_INLINE(NI, tolinenumber_192415_155036129)(Tlineinfo191336 info0) {
NI result0;
result0 = (NI)0;
result0 = ((NI) (info0.line));
return result0;
}
N_NIMCALL(NI, safelinenm_532721_839829468)(Tlineinfo191336 info0) {
NI result0;
result0 = (NI)0;
result0 = tolinenumber_192415_155036129(info0);
{
if (!(result0 < ((NI) 0))) goto LA3;
result0 = ((NI) 0);
}
LA3: ;
return result0;
}
N_NIMCALL(void, genclinedir_532813_839829468)(Ropeobj178006** r0, Tlineinfo191336 info0) {
NimStringDesc* LOC1;
NI LOC2;
LOC1 = (NimStringDesc*)0;
LOC1 = tofullpath_192264_155036129(info0.fileindex);
LOC2 = (NI)0;
LOC2 = safelinenm_532721_839829468(info0);
genclinedir_532725_839829468(r0, LOC1, LOC2);
}
N_NIMCALL(Tctypekind529007, mapsettype_533389_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
NI64 LOC1;
result0 = (Tctypekind529007)0;
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(typ0);
switch (((NI) (LOC1))) {
case ((NI) 1):
{
result0 = ((Tctypekind529007) 4);
}
break;
case ((NI) 2):
{
result0 = ((Tctypekind529007) 5);
}
break;
case ((NI) 4):
{
result0 = ((Tctypekind529007) 6);
}
break;
case ((NI) 8):
{
result0 = ((Tctypekind529007) 7);
}
break;
default:
{
result0 = ((Tctypekind529007) 17);
}
break;
}
return result0;
}
N_NIMCALL(Tctypekind529007, maptype_533393_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
result0 = (Tctypekind529007)0;
switch ((*typ0).kind) {
case ((Ttypekind292244) 0):
case ((Ttypekind292244) 7):
{
result0 = ((Tctypekind529007) 0);
}
break;
case ((Ttypekind292244) 1):
{
result0 = ((Tctypekind529007) 2);
}
break;
case ((Ttypekind292244) 2):
{
result0 = ((Tctypekind529007) 1);
}
break;
case ((Ttypekind292244) 19):
{
result0 = mapsettype_533389_839829468(typ0);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 48):
{
result0 = ((Tctypekind529007) 17);
}
break;
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
result0 = ((Tctypekind529007) 19);
}
break;
case ((Ttypekind292244) 10):
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 12):
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 15):
case ((Ttypekind292244) 46):
case ((Ttypekind292244) 47):
case ((Ttypekind292244) 49):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC8;
LOC8 = (Ttype292840*)0;
LOC8 = lastson_295377_850551059(typ0);
result0 = maptype_533393_839829468(LOC8);
}
break;
case ((Ttypekind292244) 14):
{
{
NI64 LOC12;
LOC12 = (NI64)0;
LOC12 = firstord_320001_3876443242(typ0);
if (!(LOC12 < IL64(0))) goto LA13;
result0 = ((Tctypekind529007) 6);
}
goto LA10;
LA13: ;
{
NI64 LOC16;
LOC16 = (NI64)0;
LOC16 = getsize_320135_3876443242(typ0);
switch (((NI) (LOC16))) {
case ((NI) 1):
{
result0 = ((Tctypekind529007) 13);
}
break;
case ((NI) 2):
{
result0 = ((Tctypekind529007) 14);
}
break;
case ((NI) 4):
{
result0 = ((Tctypekind529007) 6);
}
break;
case ((NI) 8):
{
result0 = ((Tctypekind529007) 7);
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
break;
}
}
LA10: ;
}
break;
case ((Ttypekind292244) 20):
{
result0 = maptype_533393_839829468((*typ0).sons->data[((NI) 0)]);
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 23):
case ((Ttypekind292244) 22):
{
Ttype292840* base0;
Ttype292840* LOC24;
LOC24 = (Ttype292840*)0;
LOC24 = lastson_295377_850551059(typ0);
base0 = skiptypes_296099_850551059(LOC24, IL64(211106232576256));
switch ((*base0).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 48):
{
result0 = ((Tctypekind529007) 18);
}
break;
default:
{
result0 = ((Tctypekind529007) 20);
}
break;
}
}
break;
case ((Ttypekind292244) 26):
{
result0 = ((Tctypekind529007) 20);
}
break;
case ((Ttypekind292244) 24):
{
result0 = ((Tctypekind529007) 22);
}
break;
case ((Ttypekind292244) 25):
{
{
if (!!(((*typ0).callconv == ((Tcallingconvention292002) 8)))) goto LA32;
result0 = ((Tctypekind529007) 23);
}
goto LA30;
LA32: ;
{
result0 = ((Tctypekind529007) 19);
}
LA30: ;
}
break;
case ((Ttypekind292244) 28):
{
result0 = ((Tctypekind529007) 21);
}
break;
case ((Ttypekind292244) 29):
{
result0 = ((Tctypekind529007) 24);
}
break;
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
{
result0 = ((Tctypekind529007) ((NI)(((NI) ((NI)(((NI) ((*typ0).kind)) - ((NI) 31)))) + ((NI) 3))));
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC43;
if (!!(((*typ0).n == NIM_NIL))) goto LA41;
LOC43 = (Ttype292840*)0;
LOC43 = lastson_295377_850551059(typ0);
result0 = maptype_533393_839829468(LOC43);
}
goto LA39;
LA41: ;
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
LA39: ;
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
break;
}
return result0;
}
N_NIMCALL(NIM_BOOL, isimportedcpptype_533476_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).sym == NIM_NIL));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NIM_BOOL, needscomplexassignment_533509_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = containsgarbagecollectedref_320117_3876443242(typ0);
return result0;
}
static N_INLINE(NIM_BOOL, isobjlackingtypefield_533513_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
NIM_BOOL LOC4;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*typ0).kind == ((Ttypekind292244) 17));
if (!(LOC1)) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = ((*typ0).sons->data[((NI) 0)] == NIM_NIL);
LA5: ;
LOC3 = LOC4;
if (LOC3) goto LA6;
LOC3 = ispureobject_320138_3876443242(typ0);
LA6: ;
LOC1 = LOC3;
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NIM_BOOL, isinvalidreturntype_533548_839829468)(Ttype292840* rettype0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!(rettype0 == NIM_NIL)) goto LA3;
result0 = NIM_TRUE;
}
goto LA1;
LA3: ;
{
Tctypekind529007 LOC6;
LOC6 = (Tctypekind529007)0;
LOC6 = maptype_533393_839829468(rettype0);
switch (LOC6) {
case ((Tctypekind529007) 17):
{
Ttype292840* LOC8;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059(rettype0, IL64(211106232576256));
result0 = !(((*LOC8).kind == ((Ttypekind292244) 23) || (*LOC8).kind == ((Ttypekind292244) 22) || (*LOC8).kind == ((Ttypekind292244) 21)));
}
break;
case ((Tctypekind529007) 19):
{
Ttype292840* t0;
NIM_BOOL LOC16;
NIM_BOOL LOC18;
NIM_BOOL LOC20;
t0 = skiptypes_296099_850551059(rettype0, IL64(211106232576256));
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = isimportedcpptype_533476_839829468(rettype0);
if (LOC12) goto LA13;
LOC12 = isimportedcpptype_533476_839829468(t0);
LA13: ;
if (!LOC12) goto LA14;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA14: ;
LOC16 = (NIM_BOOL)0;
LOC16 = needscomplexassignment_533509_839829468(t0);
if (LOC16) goto LA17;
LOC18 = (NIM_BOOL)0;
LOC18 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC18)) goto LA19;
LOC20 = (NIM_BOOL)0;
LOC20 = isobjlackingtypefield_533513_839829468(t0);
LOC18 = !(LOC20);
LA19: ;
LOC16 = LOC18;
LA17: ;
result0 = LOC16;
}
break;
default:
{
result0 = NIM_FALSE;
}
break;
}
}
LA1: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, typename_533292_839829468)(Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NimStringDesc* LOC5;
if (!!(((*typ0).sym == NIM_NIL))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = mangle_528847_2036603609((*(*(*typ0).sym).name).s);
result0 = rope_178277_2381377266(LOC5);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_28), LOC7, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypename_533313_839829468)(Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*typ0).sym == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = !(((96 & (*(*typ0).sym).flags) == 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = (*(*typ0).sym).loc.r;
}
goto LA1;
LA5: ;
{
{
Ropeobj178006* LOC12;
Ropeobj178006* LOC13;
if (!((*typ0).loc.r == NIM_NIL)) goto LA10;
LOC12 = (Ropeobj178006*)0;
LOC12 = typename_533292_839829468(typ0);
LOC13 = (Ropeobj178006*)0;
LOC13 = rope_178401_2381377266(((NI64) ((*typ0).Sup.id)));
asgnRefNoCycle((void**) (&(*typ0).loc.r), HEX26_178418_2381377266(LOC12, LOC13));
}
LA10: ;
result0 = (*typ0).loc.r;
}
LA1: ;
{
NimStringDesc* LOC18;
if (!(result0 == NIM_NIL)) goto LA16;
LOC18 = (NimStringDesc*)0;
LOC18 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC18, ((NimStringDesc*) &T839829468_29));
appendString(LOC18, reprEnum((NI)(*typ0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC18);
}
LA16: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, typenameorliteral_533898_839829468)(Ttype292840* t0, NimStringDesc* literal0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = !(((*t0).sym == NIM_NIL));
if (!(LOC4)) goto LA5;
LOC4 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
LA5: ;
LOC3 = LOC4;
if (!(LOC3)) goto LA6;
LOC3 = ((*(*t0).sym).magic == ((Tmagic292524) 0));
LA6: ;
if (!LOC3) goto LA7;
result0 = gettypename_533313_839829468(t0);
}
goto LA1;
LA7: ;
{
result0 = rope_178277_2381377266(literal0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getsimpletypedesc_533936_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
switch ((*typ0).kind) {
case ((Ttypekind292244) 26):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_30));
}
break;
case ((Ttypekind292244) 28):
{
Ropeobj178006* LOC3;
LOC3 = (Ropeobj178006*)0;
LOC3 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_31));
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_32));
}
break;
case ((Ttypekind292244) 29):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_33));
}
break;
case ((Ttypekind292244) 1):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_34));
}
break;
case ((Ttypekind292244) 2):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_35));
}
break;
case ((Ttypekind292244) 5):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_18));
}
break;
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
{
result0 = typenameorliteral_533898_839829468(typ0, Numericaltypetostr_533941_839829468[((*typ0).kind)- 31]);
}
break;
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 15):
{
result0 = getsimpletypedesc_533936_839829468(m0, (*typ0).sons->data[((NI) 0)]);
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC15;
if (!!(((*typ0).n == NIM_NIL))) goto LA13;
LOC15 = (Ttype292840*)0;
LOC15 = lastson_295377_850551059(typ0);
result0 = getsimpletypedesc_533936_839829468(m0, LOC15);
}
goto LA11;
LA13: ;
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_50));
}
LA11: ;
}
break;
case ((Ttypekind292244) 11):
{
Ttype292840* LOC18;
LOC18 = (Ttype292840*)0;
LOC18 = lastson_295377_850551059(typ0);
result0 = getsimpletypedesc_533936_839829468(m0, LOC18);
}
break;
default:
{
result0 = NIM_NIL;
}
break;
}
return result0;
}
N_NIMCALL(Ropeobj178006*, cachegettype_533591_839829468)(Tidtable292850 tab0, Ttype292840* key0) {
Ropeobj178006* result0;
Tidobj199004* LOC1;
TNimObject* LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (Tidobj199004*)0;
LOC1 = &key0->Sup;
LOC2 = (TNimObject*)0;
LOC2 = idtableget_299086_2984716966(tab0, LOC1);
result0 = ((Ropeobj178006*) (LOC2));
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypepre_533972_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(typ0 == NIM_NIL)) goto LA3;
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_26));
}
goto LA1;
LA3: ;
{
result0 = getsimpletypedesc_533936_839829468(m0, typ0);
{
if (!(result0 == NIM_NIL)) goto LA8;
result0 = cachegettype_533591_839829468((*m0).typecache, typ0);
}
LA8: ;
}
LA1: ;
return result0;
}
N_NIMCALL(NIM_BOOL, isimportedtype_533449_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).sym == NIM_NIL));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NimStringDesc*, getforwardstructformat_534015_839829468)(Tcgen529027* m0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
result0 = copyString(((NimStringDesc*) &T839829468_54));
}
goto LA1;
LA5: ;
{
result0 = copyString(((NimStringDesc*) &T839829468_55));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, structorunion_534001_839829468)(Ttype292840* t0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag292431) 1))&31U)))!=0)) goto LA3;
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_56));
}
goto LA1;
LA3: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_57));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypeforward_534039_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
{ result0 = (Ropeobj178006*)0;
result0 = cachegettype_533591_839829468((*m0).forwtypecache, typ0);
{
if (!!((result0 == NIM_NIL))) goto LA3;
goto BeforeRet;
}
LA3: ;
result0 = gettypepre_533972_839829468(m0, typ0);
{
if (!!((result0 == NIM_NIL))) goto LA7;
goto BeforeRet;
}
LA7: ;
switch ((*typ0).kind) {
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 18):
case ((Ttypekind292244) 17):
{
Tidobj199004* LOC17;
TNimObject* LOC18;
result0 = gettypename_533313_839829468(typ0);
{
NIM_BOOL LOC12;
NimStringDesc* LOC15;
TY532811 LOC16;
LOC12 = (NIM_BOOL)0;
LOC12 = isimportedtype_533449_839829468(typ0);
if (!!(LOC12)) goto LA13;
LOC15 = (NimStringDesc*)0;
LOC15 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = structorunion_534001_839829468(typ0);
LOC16[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC15, LOC16, 2);
}
LA13: ;
LOC17 = (Tidobj199004*)0;
LOC17 = &typ0->Sup;
LOC18 = (TNimObject*)0;
LOC18 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC17, LOC18);
}
break;
default:
{
NimStringDesc* LOC20;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 16);
appendString(LOC20, ((NimStringDesc*) &T839829468_58));
appendString(LOC20, reprEnum((NI)(*typ0).kind, (&NTI292244)));
appendChar(LOC20, 41);
internalerror_196113_155036129(LOC20);
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, pushtype_533958_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
(*m0).typestack = (Ttypeseq292836*) incrSeqV2(&((*m0).typestack)->Sup, sizeof(Ttype292840*));
asgnRefNoCycle((void**) (&(*m0).typestack->data[(*m0).typestack->Sup.len]), typ0);
++(*m0).typestack->Sup.len;
}
N_NIMCALL(Ropeobj178006*, gettypedescweak_534079_839829468)(Tcgen529027* m0, Ttype292840* t0, Intset268030* check0) {
Ropeobj178006* result0;
Ttype292840* etb0;
result0 = (Ropeobj178006*)0;
etb0 = skiptypes_296099_850551059(t0, IL64(211106232576256));
switch ((*etb0).kind) {
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = isimportedcpptype_533476_839829468(etb0);
if (!(LOC4)) goto LA5;
LOC4 = ((*t0).kind == ((Ttypekind292244) 11));
LA5: ;
if (!LOC4) goto LA6;
result0 = gettypedescaux_533503_839829468(m0, t0, check0);
}
goto LA2;
LA6: ;
{
Ttype292840* x0;
x0 = getuniquetype_528640_2036603609(etb0);
result0 = gettypeforward_534039_839829468(m0, x0);
pushtype_533958_839829468(m0, x0);
}
LA2: ;
}
break;
case ((Ttypekind292244) 24):
{
Ttype292840* x0;
Ropeobj178006* LOC10;
x0 = getuniquetype_528640_2036603609(etb0);
LOC10 = (Ropeobj178006*)0;
LOC10 = gettypeforward_534039_839829468(m0, x0);
result0 = HEX26_178447_2381377266(LOC10, ((NimStringDesc*) &T839829468_53));
pushtype_533958_839829468(m0, x0);
}
break;
default:
{
result0 = gettypedescaux_533503_839829468(m0, t0, check0);
}
break;
}
return result0;
}
static N_INLINE(NI, len_293081_850551059)(Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
if (!(*n0).kindU.S6.sons == 0) goto LA3;
result0 = ((NI) 0);
}
goto LA1;
LA3: ;
{
result0 = ((*n0).kindU.S6.sons ? (*n0).kindU.S6.sons->Sup.len : 0);
}
LA1: ;
return result0;
}
N_NIMCALL(void, appcg_532632_839829468)(Tcgen529027* m0, Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = ropecg_532407_839829468(m0, frmt0, args0, args0Len0);
add_178482_2381377266(c0, LOC1);
}
N_NIMCALL(NIM_BOOL, scancppgenericslot_534827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0) {
NIM_BOOL result0;
NI begin0;
{ result0 = (NIM_BOOL)0;
(*cursor0) += ((NI) 1);
begin0 = (*cursor0);
{
while (1) {
if (!((NU8)(pat0->data[(*cursor0)]) == (NU8)(42))) goto LA2;
(*cursor0) += ((NI) 1);
} LA2: ;
}
{
if (!(((NU8)(pat0->data[(*cursor0)])) >= ((NU8)(48)) && ((NU8)(pat0->data[(*cursor0)])) <= ((NU8)(57)))) goto LA5;
(*outidx0) = ((NI) ((NI)(((NI) (((NU8)(pat0->data[(*cursor0)])))) - ((NI) 48))));
(*outstars0) = (NI)((*cursor0) - begin0);
(*cursor0) += ((NI) 1);
result0 = NIM_TRUE;
goto BeforeRet;
}
goto LA3;
LA5: ;
{
result0 = NIM_FALSE;
goto BeforeRet;
}
LA3: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ttype292840*, resolvestarsincpptype_534891_839829468)(Ttype292840* typ0, NI idx0, NI stars0) {
Ttype292840* result0;
result0 = (Ttype292840*)0;
{
NI LOC3;
LOC3 = (NI)0;
LOC3 = len_295339_850551059(typ0);
if (!(LOC3 <= idx0)) goto LA4;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_81));
}
LA4: ;
result0 = (*typ0).sons->data[idx0];
{
NI i_534906_839829468;
NI res_534931_839829468;
i_534906_839829468 = (NI)0;
res_534931_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534931_839829468 <= stars0)) goto LA8;
i_534906_839829468 = res_534931_839829468;
{
NIM_BOOL LOC11;
NI LOC13;
LOC11 = (NIM_BOOL)0;
LOC11 = !((result0 == NIM_NIL));
if (!(LOC11)) goto LA12;
LOC13 = (NI)0;
LOC13 = len_295339_850551059(result0);
LOC11 = (((NI) 0) < LOC13);
LA12: ;
if (!LOC11) goto LA14;
{
if (!((*result0).kind == ((Ttypekind292244) 11))) goto LA18;
result0 = (*result0).sons->data[((NI) 1)];
}
goto LA16;
LA18: ;
{
result0 = elemtype_320394_3876443242(result0);
}
LA16: ;
}
LA14: ;
res_534931_839829468 += ((NI) 1);
} LA8: ;
}
}
return result0;
}
N_NIMCALL(NimStringDesc*, manglefield_532973_839829468)(Tident199010* name0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
result0 = mangle_528847_2036603609((*name0).s);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = iskeyword_532960_839829468(name0);
if (!LOC3) goto LA4;
result0->data[((NI) 0)] = nsuToUpperAsciiChar(result0->data[((NI) 0)]);
}
LA4: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, manglerecfieldname_534361_839829468)(Tsym292834* field0, Ttype292840* rectype0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*rectype0).sym == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = !(((96 & (*(*rectype0).sym).flags) == 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = (*field0).loc.r;
}
goto LA1;
LA5: ;
{
NimStringDesc* LOC8;
LOC8 = (NimStringDesc*)0;
LOC8 = manglefield_532973_839829468((*field0).name);
result0 = rope_178277_2381377266(LOC8);
}
LA1: ;
{
if (!(result0 == NIM_NIL)) goto LA11;
internalerror_196100_155036129((*field0).info, ((NimStringDesc*) &T839829468_96));
}
LA11: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genrecordfieldsaux_534421_839829468)(Tcgen529027* m0, Tnode292802* n0, Ropeobj178006* accessexpr0, Ttype292840* rectype0, Intset268030* check0) {
Ropeobj178006* result0;
Ropeobj178006* ae0;
Ropeobj178006* uname0;
Ropeobj178006* sname0;
Ropeobj178006* a0;
Tnode292802* k0;
Tsym292834* field0;
{ result0 = (Ropeobj178006*)0;
ae0 = (Ropeobj178006*)0;
uname0 = (Ropeobj178006*)0;
sname0 = (Ropeobj178006*)0;
a0 = (Ropeobj178006*)0;
k0 = (Tnode292802*)0;
field0 = (Tsym292834*)0;
result0 = NIM_NIL;
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
{
NI i_534447_839829468;
NI HEX3Atmp_534620_839829468;
NI LOC3;
NI res_534623_839829468;
i_534447_839829468 = (NI)0;
HEX3Atmp_534620_839829468 = (NI)0;
LOC3 = (NI)0;
LOC3 = sonslen_295351_850551059(n0);
HEX3Atmp_534620_839829468 = (NI)(LOC3 - ((NI) 1));
res_534623_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC6;
if (!(res_534623_839829468 <= HEX3Atmp_534620_839829468)) goto LA5;
i_534447_839829468 = res_534623_839829468;
LOC6 = (Ropeobj178006*)0;
LOC6 = genrecordfieldsaux_534421_839829468(m0, (*n0).kindU.S6.sons->data[i_534447_839829468], accessexpr0, rectype0, check0);
add_178482_2381377266(&result0, LOC6);
res_534623_839829468 += ((NI) 1);
} LA5: ;
}
}
}
break;
case ((Tnodekind292020) 139):
{
Ropeobj178006* LOC12;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
Ropeobj178006* unionbody0;
{
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)))) goto LA10;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_89));
}
LA10: ;
LOC12 = (Ropeobj178006*)0;
LOC12 = genrecordfieldsaux_534421_839829468(m0, (*n0).kindU.S6.sons->data[((NI) 0)], accessexpr0, rectype0, check0);
add_178482_2381377266(&result0, LOC12);
LOC13 = (NimStringDesc*)0;
LOC14 = (NimStringDesc*)0;
LOC14 = mangle_528847_2036603609((*(*(*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s);
LOC13 = rawNewString(LOC14->Sup.len + 1);
appendString(LOC13, LOC14);
appendChar(LOC13, 85);
uname0 = rope_178277_2381377266(LOC13);
{
TY532811 LOC19;
if (!!((accessexpr0 == NIM_NIL))) goto LA17;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = accessexpr0;
LOC19[1] = uname0;
ae0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC19, 2);
}
goto LA15;
LA17: ;
{
ae0 = uname0;
}
LA15: ;
unionbody0 = NIM_NIL;
{
NI i_534491_839829468;
NI HEX3Atmp_534629_839829468;
NI LOC22;
NI res_534632_839829468;
i_534491_839829468 = (NI)0;
HEX3Atmp_534629_839829468 = (NI)0;
LOC22 = (NI)0;
LOC22 = sonslen_295351_850551059(n0);
HEX3Atmp_534629_839829468 = (NI)(LOC22 - ((NI) 1));
res_534632_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534632_839829468 <= HEX3Atmp_534629_839829468)) goto LA24;
i_534491_839829468 = res_534632_839829468;
switch ((*(*n0).kindU.S6.sons->data[i_534491_839829468]).kind) {
case ((Tnodekind292020) 85):
case ((Tnodekind292020) 88):
{
k0 = lastson_295364_850551059((*n0).kindU.S6.sons->data[i_534491_839829468]);
{
Ropeobj178006* LOC30;
TY532811 LOC31;
Ropeobj178006* LOC32;
if (!!(((*k0).kind == ((Tnodekind292020) 3)))) goto LA28;
LOC30 = (Ropeobj178006*)0;
LOC30 = rope_178401_2381377266(((NI64) (i_534491_839829468)));
sname0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_91), LOC30);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = ae0;
LOC31[1] = sname0;
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC31, 2);
a0 = genrecordfieldsaux_534421_839829468(m0, k0, LOC32, rectype0, check0);
{
TY178507 LOC37;
if (!!((a0 == NIM_NIL))) goto LA35;
add_178487_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_92));
add_178482_2381377266(&unionbody0, a0);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = sname0;
addf_179205_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_93), LOC37, 1);
}
LA35: ;
}
goto LA26;
LA28: ;
{
Ropeobj178006* LOC39;
LOC39 = (Ropeobj178006*)0;
LOC39 = genrecordfieldsaux_534421_839829468(m0, k0, ae0, rectype0, check0);
add_178482_2381377266(&unionbody0, LOC39);
}
LA26: ;
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_94));
}
break;
}
res_534632_839829468 += ((NI) 1);
} LA24: ;
}
}
{
TY532811 LOC45;
if (!!((unionbody0 == NIM_NIL))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = unionbody0;
LOC45[1] = uname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_95), LOC45, 2);
}
LA43: ;
}
break;
case ((Tnodekind292020) 3):
{
field0 = (*n0).kindU.S4.sym;
{
if (!((*(*field0).typ).kind == ((Ttypekind292244) 62))) goto LA49;
goto BeforeRet;
}
LA49: ;
sname0 = manglerecfieldname_534361_839829468(field0, rectype0);
{
TY532811 LOC55;
if (!!((accessexpr0 == NIM_NIL))) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = accessexpr0;
LOC55[1] = sname0;
ae0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC55, 2);
}
goto LA51;
LA53: ;
{
ae0 = sname0;
}
LA51: ;
fillloc_532282_839829468((&(*field0).loc), ((Tlockind292808) 5), (*field0).typ, ae0, ((Tstorageloc292812) 0));
{
NIM_BOOL LOC59;
Ttype292840* fieldtype0;
LOC59 = (NIM_BOOL)0;
LOC59 = isimportedcpptype_533476_839829468(rectype0);
if (!!(LOC59)) goto LA60;
fieldtype0 = skiptypes_296099_850551059((*field0).loc.t, IL64(211106232576256));
{
NIM_BOOL LOC64;
TY532811 LOC68;
Ttype292840* LOC69;
LOC64 = (NIM_BOOL)0;
LOC64 = ((*fieldtype0).kind == ((Ttypekind292244) 16));
if (!(LOC64)) goto LA65;
LOC64 = (((*fieldtype0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0);
LA65: ;
if (!LOC64) goto LA66;
memset((void*)LOC68, 0, sizeof(LOC68));
LOC69 = (Ttype292840*)0;
LOC69 = elemtype_320394_3876443242(fieldtype0);
LOC68[0] = gettypedescaux_533503_839829468(m0, LOC69, check0);
LOC68[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_97), LOC68, 2);
}
goto LA62;
LA66: ;
{
TY532811 LOC73;
if (!((*fieldtype0).kind == ((Ttypekind292244) 24))) goto LA71;
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = gettypedescweak_534079_839829468(m0, (*field0).loc.t, check0);
LOC73[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC73, 2);
}
goto LA62;
LA71: ;
{
TY535238 LOC77;
NimStringDesc* LOC78;
if (!!(((*field0).kindU.S4.bitsize == ((NI) 0)))) goto LA75;
memset((void*)LOC77, 0, sizeof(LOC77));
LOC77[0] = gettypedescaux_533503_839829468(m0, (*field0).loc.t, check0);
LOC77[1] = sname0;
LOC78 = (NimStringDesc*)0;
LOC78 = nimIntToStr((*field0).kindU.S4.bitsize);
LOC77[2] = rope_178277_2381377266(LOC78);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_98), LOC77, 3);
}
goto LA62;
LA75: ;
{
TY532811 LOC80;
memset((void*)LOC80, 0, sizeof(LOC80));
LOC80[0] = gettypedescaux_533503_839829468(m0, (*field0).loc.t, check0);
LOC80[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC80, 2);
}
LA62: ;
}
LA60: ;
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_99));
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getrecordfields_534636_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = genrecordfieldsaux_534421_839829468(m0, (*typ0).n, NIM_NIL, typ0, check0);
return result0;
}
N_NIMCALL(Ropeobj178006*, getrecorddesc_534643_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0) {
Ropeobj178006* result0;
NIM_BOOL hasfield0;
Ropeobj178006* attribute0;
TY535238 LOC6;
Ropeobj178006* desc0;
NimStringDesc* LOC46;
result0 = (Ropeobj178006*)0;
hasfield0 = NIM_FALSE;
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 21))&31U)))!=0)) goto LA3;
attribute0 = rope_178277_2381377266(Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field19);
}
goto LA1;
LA3: ;
{
attribute0 = NIM_NIL;
}
LA1: ;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = structorunion_534001_839829468(typ0);
LOC6[1] = name0;
LOC6[2] = attribute0;
result0 = ropecg_532407_839829468(m0, Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field18, LOC6, 3);
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA9;
{
if (!((*typ0).sons->data[((NI) 0)] == NIM_NIL)) goto LA13;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
TY533289 LOC23;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = !(((*typ0).sym == NIM_NIL));
if (!(LOC18)) goto LA19;
LOC18 = (((*(*typ0).sym).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0);
LA19: ;
LOC17 = LOC18;
if (LOC17) goto LA20;
LOC17 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
LA20: ;
if (!LOC17) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_85), LOC23, 0);
}
goto LA15;
LA21: ;
{
TY532811 LOC25;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = name0;
LOC25[1] = attribute0;
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_86), LOC25, 2);
hasfield0 = NIM_TRUE;
}
LA15: ;
}
goto LA11;
LA13: ;
{
NIM_BOOL LOC27;
TY178507 LOC31;
Ttype292840* LOC32;
LOC27 = (NIM_BOOL)0;
LOC27 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC27) goto LA28;
LOC27 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA28: ;
if (!LOC27) goto LA29;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ttype292840*)0;
LOC32 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360));
LOC31[0] = gettypedescaux_533503_839829468(m0, LOC32, check0);
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_87), LOC31, 1);
hasfield0 = NIM_TRUE;
}
goto LA11;
LA29: ;
{
TY178507 LOC34;
Ttype292840* LOC35;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC35 = (Ttype292840*)0;
LOC35 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360));
LOC34[0] = gettypedescaux_533503_839829468(m0, LOC35, check0);
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_88), LOC34, 1);
hasfield0 = NIM_TRUE;
}
LA11: ;
}
goto LA7;
LA9: ;
{
TY178507 LOC37;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = name0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_85), LOC37, 1);
}
LA7: ;
desc0 = getrecordfields_534636_839829468(m0, typ0, check0);
{
NIM_BOOL LOC40;
TY533289 LOC44;
LOC40 = (NIM_BOOL)0;
LOC40 = (desc0 == NIM_NIL);
if (!(LOC40)) goto LA41;
LOC40 = !(hasfield0);
LA41: ;
if (!LOC40) goto LA42;
memset((void*)LOC44, 0, sizeof(LOC44));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_100), LOC44, 0);
}
goto LA38;
LA42: ;
{
add_178482_2381377266(&result0, desc0);
}
LA38: ;
LOC46 = (NimStringDesc*)0;
LOC46 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC46, ((NimStringDesc*) &T839829468_101));
appendString(LOC46, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC46);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettupledesc_534777_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0) {
Ropeobj178006* result0;
TY532811 LOC1;
Ropeobj178006* desc0;
NimStringDesc* LOC13;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = structorunion_534001_839829468(typ0);
LOC1[1] = name0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_102), LOC1, 2);
desc0 = NIM_NIL;
{
NI i_534799_839829468;
NI HEX3Atmp_534820_839829468;
NI LOC3;
NI res_534823_839829468;
i_534799_839829468 = (NI)0;
HEX3Atmp_534820_839829468 = (NI)0;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
HEX3Atmp_534820_839829468 = (NI)(LOC3 - ((NI) 1));
res_534823_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC6;
if (!(res_534823_839829468 <= HEX3Atmp_534820_839829468)) goto LA5;
i_534799_839829468 = res_534823_839829468;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = gettypedescaux_533503_839829468(m0, (*typ0).sons->data[i_534799_839829468], check0);
LOC6[1] = rope_178401_2381377266(((NI64) (i_534799_839829468)));
addf_179205_2381377266(&desc0, ((NimStringDesc*) &T839829468_103), LOC6, 2);
res_534823_839829468 += ((NI) 1);
} LA5: ;
}
}
{
NimStringDesc* LOC11;
if (!(desc0 == NIM_NIL)) goto LA9;
LOC11 = (NimStringDesc*)0;
LOC11 = rawNewString(tnl_176644_4151366050->Sup.len + 11);
appendString(LOC11, ((NimStringDesc*) &T839829468_104));
appendString(LOC11, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC11);
}
goto LA7;
LA9: ;
{
add_178482_2381377266(&result0, desc0);
}
LA7: ;
LOC13 = (NimStringDesc*)0;
LOC13 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC13, ((NimStringDesc*) &T839829468_101));
appendString(LOC13, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC13);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypedescaux_533503_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0) {
Ropeobj178006* result0;
Ttype292840* t_534942_839829468;
{ result0 = (Ropeobj178006*)0;
t_534942_839829468 = getuniquetype_528640_2036603609(typ0);
{
if (!(t_534942_839829468 == NIM_NIL)) goto LA3;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_27));
}
LA3: ;
{
if (!!(((*t_534942_839829468).sym == NIM_NIL))) goto LA7;
useheader_532369_839829468(m0, (*t_534942_839829468).sym);
}
LA7: ;
result0 = gettypepre_533972_839829468(m0, t_534942_839829468);
{
if (!!((result0 == NIM_NIL))) goto LA11;
goto BeforeRet;
}
LA11: ;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = containsorincl_268862_2627731572(check0, (*t_534942_839829468).Sup.id);
if (!LOC15) goto LA16;
{
NIM_BOOL LOC20;
NimStringDesc* LOC24;
NimStringDesc* LOC25;
LOC20 = (NIM_BOOL)0;
LOC20 = isimportedcpptype_533476_839829468(typ0);
if (LOC20) goto LA21;
LOC20 = isimportedcpptype_533476_839829468(t_534942_839829468);
LA21: ;
if (!!(LOC20)) goto LA22;
LOC24 = (NimStringDesc*)0;
LOC25 = (NimStringDesc*)0;
LOC25 = typetostring_320017_3876443242(typ0, ((Tprefereddesc320011) 0));
LOC24 = rawNewString(LOC25->Sup.len + 28);
appendString(LOC24, ((NimStringDesc*) &T839829468_51));
appendString(LOC24, LOC25);
internalerror_196113_155036129(LOC24);
}
LA22: ;
}
LA16: ;
switch ((*t_534942_839829468).kind) {
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 23):
{
NimStringDesc* star0;
Ttype292840* et0;
Ttype292840* LOC38;
Ttype292840* etb0;
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
NIM_BOOL LOC33;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((*t_534942_839829468).kind == ((Ttypekind292244) 23));
if (!(LOC30)) goto LA31;
LOC30 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA32;
LOC33 = (NIM_BOOL)0;
LOC33 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC33) goto LA34;
LOC33 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA34: ;
LOC29 = LOC33;
LA32: ;
if (!LOC29) goto LA35;
star0 = copyString(((NimStringDesc*) &T839829468_52));
}
goto LA27;
LA35: ;
{
star0 = copyString(((NimStringDesc*) &T839829468_53));
}
LA27: ;
LOC38 = (Ttype292840*)0;
LOC38 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
et0 = lastson_295377_850551059(LOC38);
etb0 = skiptypes_296099_850551059(et0, IL64(211106232576256));
{
if (!((*etb0).kind == ((Ttypekind292244) 4) || (*etb0).kind == ((Ttypekind292244) 16) || (*etb0).kind == ((Ttypekind292244) 27) || (*etb0).kind == ((Ttypekind292244) 48))) goto LA41;
et0 = elemtype_320394_3876443242(etb0);
etb0 = skiptypes_296099_850551059(et0, IL64(211106232576256));
star0->data[((NI) 0)] = 42;
}
LA41: ;
switch ((*etb0).kind) {
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC46;
Ropeobj178006* LOC50;
LOC46 = (NIM_BOOL)0;
LOC46 = isimportedcpptype_533476_839829468(etb0);
if (!(LOC46)) goto LA47;
LOC46 = ((*et0).kind == ((Ttypekind292244) 11));
LA47: ;
if (!LOC46) goto LA48;
LOC50 = (Ropeobj178006*)0;
LOC50 = gettypedescaux_533503_839829468(m0, et0, check0);
result0 = HEX26_178447_2381377266(LOC50, star0);
}
goto LA44;
LA48: ;
{
Ttype292840* x0;
Ropeobj178006* name0;
Tidobj199004* LOC52;
TNimObject* LOC53;
x0 = getuniquetype_528640_2036603609(etb0);
name0 = gettypeforward_534039_839829468(m0, x0);
result0 = HEX26_178447_2381377266(name0, star0);
LOC52 = (Tidobj199004*)0;
LOC52 = &t_534942_839829468->Sup;
LOC53 = (TNimObject*)0;
LOC53 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC52, LOC53);
pushtype_533958_839829468(m0, x0);
}
LA44: ;
}
break;
case ((Ttypekind292244) 24):
{
Ttype292840* x0;
Ropeobj178006* name0;
Ropeobj178006* LOC55;
Tidobj199004* LOC56;
TNimObject* LOC57;
x0 = getuniquetype_528640_2036603609(etb0);
name0 = gettypeforward_534039_839829468(m0, x0);
LOC55 = (Ropeobj178006*)0;
LOC55 = HEX26_178447_2381377266(name0, ((NimStringDesc*) &T839829468_53));
result0 = HEX26_178447_2381377266(LOC55, star0);
LOC56 = (Tidobj199004*)0;
LOC56 = &t_534942_839829468->Sup;
LOC57 = (TNimObject*)0;
LOC57 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC56, LOC57);
pushtype_533958_839829468(m0, x0);
}
break;
default:
{
Ropeobj178006* LOC59;
Tidobj199004* LOC60;
TNimObject* LOC61;
LOC59 = (Ropeobj178006*)0;
LOC59 = gettypedescaux_533503_839829468(m0, et0, check0);
result0 = HEX26_178447_2381377266(LOC59, star0);
LOC60 = (Tidobj199004*)0;
LOC60 = &t_534942_839829468->Sup;
LOC61 = (TNimObject*)0;
LOC61 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC60, LOC61);
}
break;
}
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
Ropeobj178006* LOC63;
Tidobj199004* LOC64;
TNimObject* LOC65;
LOC63 = (Ropeobj178006*)0;
LOC63 = gettypedescweak_534079_839829468(m0, (*t_534942_839829468).sons->data[((NI) 0)], check0);
result0 = HEX26_178447_2381377266(LOC63, ((NimStringDesc*) &T839829468_53));
LOC64 = (Tidobj199004*)0;
LOC64 = &t_534942_839829468->Sup;
LOC65 = (TNimObject*)0;
LOC65 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC64, LOC65);
}
break;
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 14):
{
Ttype292840* t0;
{
if (!((*t_534942_839829468).kind == ((Ttypekind292244) 20))) goto LA69;
t0 = lastson_295377_850551059(t_534942_839829468);
}
goto LA67;
LA69: ;
{
t0 = t_534942_839829468;
}
LA67: ;
result0 = cachegettype_533591_839829468((*m0).typecache, t0);
{
if (!(result0 == NIM_NIL)) goto LA74;
result0 = gettypename_533313_839829468(t0);
{
NIM_BOOL LOC78;
NIM_BOOL LOC80;
Tidobj199004* LOC84;
TNimObject* LOC85;
NI size0;
NU32 owner0;
LOC78 = (NIM_BOOL)0;
LOC78 = isimportedcpptype_533476_839829468(t0);
if (LOC78) goto LA79;
LOC80 = (NIM_BOOL)0;
LOC80 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
if (!(LOC80)) goto LA81;
LOC80 = ((*(*t0).sym).magic == ((Tmagic292524) 0));
LA81: ;
LOC78 = LOC80;
LA79: ;
if (!!(LOC78)) goto LA82;
LOC84 = (Tidobj199004*)0;
LOC84 = &t0->Sup;
LOC85 = (TNimObject*)0;
LOC85 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC84, LOC85);
size0 = (NI)0;
{
NI64 LOC88;
TY178507 LOC91;
LOC88 = (NI64)0;
LOC88 = firstord_320001_3876443242(t0);
if (!(LOC88 < IL64(0))) goto LA89;
memset((void*)LOC91, 0, sizeof(LOC91));
LOC91[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC91, 1);
size0 = ((NI) 4);
}
goto LA86;
LA89: ;
{
NI64 LOC93;
LOC93 = (NI64)0;
LOC93 = getsize_320135_3876443242(t0);
size0 = ((NI) (LOC93));
switch (size0) {
case ((NI) 1):
{
TY178507 LOC95;
memset((void*)LOC95, 0, sizeof(LOC95));
LOC95[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_60), LOC95, 1);
}
break;
case ((NI) 2):
{
TY178507 LOC97;
memset((void*)LOC97, 0, sizeof(LOC97));
LOC97[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_61), LOC97, 1);
}
break;
case ((NI) 4):
{
TY178507 LOC99;
memset((void*)LOC99, 0, sizeof(LOC99));
LOC99[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC99, 1);
}
break;
case ((NI) 8):
{
TY178507 LOC101;
memset((void*)LOC101, 0, sizeof(LOC101));
LOC101[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_62), LOC101, 1);
}
break;
default:
{
internalerror_196100_155036129((*(*t0).sym).info, ((NimStringDesc*) &T839829468_63));
}
break;
}
}
LA86: ;
owner0 = hashowner_532977_839829468((*t0).sym);
{
NIM_BOOL LOC105;
TY203017* vals0;
Enumdesc203007 LOC114;
LOC105 = (NIM_BOOL)0;
LOC105 = hasenum_203230_1926258066(gdebuginfo_203470_1926258066, (*(*(*t0).sym).name).s, ((NI) ((*(*t0).sym).info.line)), owner0);
if (!!(LOC105)) goto LA106;
vals0 = (TY203017*) newSeq((&NTI203017), 0);
{
NI i_535144_839829468;
NI HEX3Atmp_535648_839829468;
NI LOC109;
NI res_535651_839829468;
i_535144_839829468 = (NI)0;
HEX3Atmp_535648_839829468 = (NI)0;
LOC109 = (NI)0;
LOC109 = len_293081_850551059((*t0).n);
HEX3Atmp_535648_839829468 = (NI)(LOC109 - ((NI) 1));
res_535651_839829468 = ((NI) 0);
{
while (1) {
Tsym292834* field0;
TY203018 LOC112;
NimStringDesc* LOC113;
if (!(res_535651_839829468 <= HEX3Atmp_535648_839829468)) goto LA111;
i_535144_839829468 = res_535651_839829468;
field0 = (*(*(*t0).n).kindU.S6.sons->data[i_535144_839829468]).kindU.S4.sym;
memset((void*)(&LOC112), 0, sizeof(LOC112));
LOC112.Field0 = copyString((*(*field0).name).s);
LOC112.Field1 = (*field0).position;
vals0 = (TY203017*) incrSeqV2(&(vals0)->Sup, sizeof(TY203018));
LOC113 = (NimStringDesc*)0;
LOC113 = vals0->data[vals0->Sup.len].Field0; vals0->data[vals0->Sup.len].Field0 = copyStringRC1(LOC112.Field0);
if (LOC113) nimGCunrefNoCycle(LOC113);
vals0->data[vals0->Sup.len].Field1 = LOC112.Field1;
++vals0->Sup.len;
res_535651_839829468 += ((NI) 1);
} LA111: ;
}
}
memset((void*)(&LOC114), 0, sizeof(LOC114));
memset((void*)(&LOC114), 0, sizeof(LOC114));
LOC114.size = size0;
LOC114.owner = owner0;
LOC114.id = (*(*t0).sym).Sup.id;
LOC114.name = copyString((*(*(*t0).sym).name).s);
genericSeqAssign((&LOC114.values), vals0, (&NTI203017));
registerenum_203419_1926258066((&gdebuginfo_203470_1926258066), (&LOC114));
}
LA106: ;
}
LA82: ;
}
LA74: ;
}
break;
case ((Ttypekind292244) 25):
{
Tidobj199004* LOC116;
TNimObject* LOC117;
Ropeobj178006* rettype0;
Ropeobj178006* desc0;
result0 = gettypename_533313_839829468(t_534942_839829468);
LOC116 = (Tidobj199004*)0;
LOC116 = &t_534942_839829468->Sup;
LOC117 = (TNimObject*)0;
LOC117 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC116, LOC117);
rettype0 = (Ropeobj178006*)0;
desc0 = (Ropeobj178006*)0;
genprocparams_534115_839829468(m0, t_534942_839829468, &rettype0, &desc0, check0, NIM_TRUE, NIM_TRUE);
{
NIM_BOOL LOC120;
LOC120 = (NIM_BOOL)0;
LOC120 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC120)) goto LA121;
{
TY535235 LOC127;
if (!!(((*t_534942_839829468).callconv == ((Tcallingconvention292002) 8)))) goto LA125;
memset((void*)LOC127, 0, sizeof(LOC127));
LOC127[0] = rope_178277_2381377266(Callingconvtostr_533585_839829468[((*t_534942_839829468).callconv)- 0]);
LOC127[1] = rettype0;
LOC127[2] = result0;
LOC127[3] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC127, 4);
}
goto LA123;
LA125: ;
{
TY535238 LOC129;
memset((void*)LOC129, 0, sizeof(LOC129));
LOC129[0] = result0;
LOC129[1] = rettype0;
LOC129[2] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC129, 3);
}
LA123: ;
}
LA121: ;
}
break;
case ((Ttypekind292244) 24):
{
Tidobj199004* LOC144;
Ropeobj178006* LOC145;
TNimObject* LOC146;
result0 = cachegettype_533591_839829468((*m0).forwtypecache, t_534942_839829468);
{
Tidobj199004* LOC142;
TNimObject* LOC143;
if (!(result0 == NIM_NIL)) goto LA133;
result0 = gettypename_533313_839829468(t_534942_839829468);
{
NIM_BOOL LOC137;
NimStringDesc* LOC140;
TY532811 LOC141;
LOC137 = (NIM_BOOL)0;
LOC137 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC137)) goto LA138;
LOC140 = (NimStringDesc*)0;
LOC140 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC141, 0, sizeof(LOC141));
LOC141[0] = structorunion_534001_839829468(t_534942_839829468);
LOC141[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC140, LOC141, 2);
}
LA138: ;
LOC142 = (Tidobj199004*)0;
LOC142 = &t_534942_839829468->Sup;
LOC143 = (TNimObject*)0;
LOC143 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC142, LOC143);
}
LA133: ;
LOC144 = (Tidobj199004*)0;
LOC144 = &t_534942_839829468->Sup;
LOC145 = (Ropeobj178006*)0;
LOC145 = HEX26_178447_2381377266(result0, ((NimStringDesc*) &T839829468_53));
LOC146 = (TNimObject*)0;
LOC146 = &LOC145->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC144, LOC146);
{
NIM_BOOL LOC149;
LOC149 = (NIM_BOOL)0;
LOC149 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC149)) goto LA150;
{
Ttype292840* LOC154;
NimStringDesc* LOC157;
NimStringDesc* LOC158;
TY532811 LOC166;
LOC154 = (Ttype292840*)0;
LOC154 = skiptypes_296099_850551059((*t_534942_839829468).sons->data[((NI) 0)], IL64(211106232576256));
if (!!(((*LOC154).kind == ((Ttypekind292244) 3)))) goto LA155;
LOC157 = (NimStringDesc*)0;
LOC158 = (NimStringDesc*)0;
{
NIM_BOOL LOC161;
LOC161 = (NIM_BOOL)0;
LOC161 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC161) goto LA162;
LOC161 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA162: ;
if (!LOC161) goto LA163;
LOC158 = copyString(((NimStringDesc*) &T839829468_76));
}
goto LA159;
LA163: ;
{
LOC158 = copyString(((NimStringDesc*) &T839829468_77));
}
LA159: ;
LOC157 = rawNewString(LOC158->Sup.len + 31);
appendString(LOC157, LOC158);
appendString(LOC157, ((NimStringDesc*) &T839829468_78));
memset((void*)LOC166, 0, sizeof(LOC166));
LOC166[0] = gettypedescaux_533503_839829468(m0, (*t_534942_839829468).sons->data[((NI) 0)], check0);
LOC166[1] = result0;
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 4))- 0], LOC157, LOC166, 2);
}
goto LA152;
LA155: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_79));
}
LA152: ;
}
LA150: ;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_53));
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
NI64 n0;
Tidobj199004* LOC173;
TNimObject* LOC174;
n0 = lengthord_320007_3876443242(t_534942_839829468);
{
if (!(n0 <= IL64(0))) goto LA171;
n0 = IL64(1);
}
LA171: ;
result0 = gettypename_533313_839829468(t_534942_839829468);
LOC173 = (Tidobj199004*)0;
LOC173 = &t_534942_839829468->Sup;
LOC174 = (TNimObject*)0;
LOC174 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC173, LOC174);
{
NIM_BOOL LOC177;
Ropeobj178006* foo0;
TY535238 LOC180;
LOC177 = (NIM_BOOL)0;
LOC177 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC177)) goto LA178;
foo0 = gettypedescaux_533503_839829468(m0, (*t_534942_839829468).sons->data[((NI) 1)], check0);
memset((void*)LOC180, 0, sizeof(LOC180));
LOC180[0] = foo0;
LOC180[1] = result0;
LOC180[2] = rope_178401_2381377266(n0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_80), LOC180, 3);
}
LA178: ;
}
break;
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC184;
Ropeobj178006* cppname0;
NI i0;
NI chunkstart0;
Ropeobj178006* LOC226;
LOC184 = (NIM_BOOL)0;
LOC184 = isimportedcpptype_533476_839829468(t_534942_839829468);
if (!(LOC184)) goto LA185;
LOC184 = ((*typ0).kind == ((Ttypekind292244) 11));
LA185: ;
if (!LOC184) goto LA186;
cppname0 = gettypename_533313_839829468(t_534942_839829468);
i0 = ((NI) 0);
chunkstart0 = ((NI) 0);
{
while (1) {
if (!(i0 < ((*cppname0).data ? (*cppname0).data->Sup.len : 0))) goto LA189;
{
NI chunkend0;
NI idx0;
NI stars0;
if (!((NU8)((*cppname0).data->data[i0]) == (NU8)(39))) goto LA192;
chunkend0 = (i0 - 1);
idx0 = (NI)0;
stars0 = (NI)0;
{
NIM_BOOL LOC196;
NimStringDesc* LOC199;
Ttype292840* typeinslot0;
LOC196 = (NIM_BOOL)0;
LOC196 = scancppgenericslot_534827_839829468((*cppname0).data, (&i0), (&idx0), (&stars0));
if (!LOC196) goto LA197;
LOC199 = (NimStringDesc*)0;
LOC199 = copyStrLast((*cppname0).data, chunkstart0, chunkend0);
add_178487_2381377266(&result0, LOC199);
chunkstart0 = i0;
typeinslot0 = resolvestarsincpptype_534891_839829468(typ0, (NI)(idx0 + ((NI) 1)), stars0);
{
NIM_BOOL LOC202;
TY533289 LOC206;
Ropeobj178006* LOC207;
LOC202 = (NIM_BOOL)0;
LOC202 = (typeinslot0 == NIM_NIL);
if (LOC202) goto LA203;
LOC202 = ((*typeinslot0).kind == ((Ttypekind292244) 62));
LA203: ;
if (!LOC202) goto LA204;
memset((void*)LOC206, 0, sizeof(LOC206));
LOC207 = (Ropeobj178006*)0;
LOC207 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC206, 0);
add_178482_2381377266(&result0, LOC207);
}
goto LA200;
LA204: ;
{
Ropeobj178006* LOC209;
LOC209 = (Ropeobj178006*)0;
LOC209 = gettypedescaux_533503_839829468(m0, typeinslot0, check0);
add_178482_2381377266(&result0, LOC209);
}
LA200: ;
}
LA197: ;
}
goto LA190;
LA192: ;
{
i0 += ((NI) 1);
}
LA190: ;
} LA189: ;
}
{
NimStringDesc* LOC215;
if (!!((chunkstart0 == ((NI) 0)))) goto LA213;
LOC215 = (NimStringDesc*)0;
LOC215 = copyStr((*cppname0).data, chunkstart0);
add_178487_2381377266(&result0, LOC215);
}
goto LA211;
LA213: ;
{
result0 = HEX26_178447_2381377266(cppname0, ((NimStringDesc*) &T839829468_82));
{
NI i_535516_839829468;
NI HEX3Atmp_535664_839829468;
NI LOC218;
NI res_535667_839829468;
i_535516_839829468 = (NI)0;
HEX3Atmp_535664_839829468 = (NI)0;
LOC218 = (NI)0;
LOC218 = len_295339_850551059(typ0);
HEX3Atmp_535664_839829468 = (NI)(LOC218 - ((NI) 2));
res_535667_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC225;
if (!(res_535667_839829468 <= HEX3Atmp_535664_839829468)) goto LA220;
i_535516_839829468 = res_535667_839829468;
{
if (!(((NI) 1) < i_535516_839829468)) goto LA223;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_83));
}
LA223: ;
LOC225 = (Ropeobj178006*)0;
LOC225 = gettypedescaux_533503_839829468(m0, (*typ0).sons->data[i_535516_839829468], check0);
add_178482_2381377266(&result0, LOC225);
res_535667_839829468 += ((NI) 1);
} LA220: ;
}
}
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_84));
}
LA211: ;
LOC226 = (Ropeobj178006*)0;
LOC226 = getrecorddesc_534643_839829468(m0, t_534942_839829468, result0, check0);
}
goto LA182;
LA186: ;
{
Tidobj199004* LOC241;
TNimObject* LOC242;
Ropeobj178006* recdesc0;
result0 = cachegettype_533591_839829468((*m0).forwtypecache, t_534942_839829468);
{
Tidobj199004* LOC239;
TNimObject* LOC240;
if (!(result0 == NIM_NIL)) goto LA230;
result0 = gettypename_533313_839829468(t_534942_839829468);
{
NIM_BOOL LOC234;
NimStringDesc* LOC237;
TY532811 LOC238;
LOC234 = (NIM_BOOL)0;
LOC234 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC234)) goto LA235;
LOC237 = (NimStringDesc*)0;
LOC237 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC238, 0, sizeof(LOC238));
LOC238[0] = structorunion_534001_839829468(t_534942_839829468);
LOC238[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC237, LOC238, 2);
}
LA235: ;
LOC239 = (Tidobj199004*)0;
LOC239 = &t_534942_839829468->Sup;
LOC240 = (TNimObject*)0;
LOC240 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC239, LOC240);
}
LA230: ;
LOC241 = (Tidobj199004*)0;
LOC241 = &t_534942_839829468->Sup;
LOC242 = (TNimObject*)0;
LOC242 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC241, LOC242);
{
if (!!(((*t_534942_839829468).kind == ((Ttypekind292244) 18)))) goto LA245;
recdesc0 = getrecorddesc_534643_839829468(m0, t_534942_839829468, result0, check0);
}
goto LA243;
LA245: ;
{
recdesc0 = gettupledesc_534777_839829468(m0, t_534942_839829468, result0, check0);
}
LA243: ;
{
NIM_BOOL LOC250;
LOC250 = (NIM_BOOL)0;
LOC250 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC250)) goto LA251;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], recdesc0);
}
LA251: ;
}
LA182: ;
}
break;
case ((Ttypekind292244) 19):
{
Ttype292840* LOC254;
Ropeobj178006* LOC255;
Tidobj199004* LOC256;
TNimObject* LOC257;
LOC254 = (Ttype292840*)0;
LOC254 = lastson_295377_850551059(t_534942_839829468);
LOC255 = (Ropeobj178006*)0;
LOC255 = gettypename_533313_839829468(LOC254);
result0 = HEX26_178447_2381377266(LOC255, ((NimStringDesc*) &T839829468_105));
LOC256 = (Tidobj199004*)0;
LOC256 = &t_534942_839829468->Sup;
LOC257 = (TNimObject*)0;
LOC257 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC256, LOC257);
{
NIM_BOOL LOC260;
NI s0;
NI64 LOC263;
LOC260 = (NIM_BOOL)0;
LOC260 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC260)) goto LA261;
LOC263 = (NI64)0;
LOC263 = getsize_320135_3876443242(t_534942_839829468);
s0 = ((NI) (LOC263));
switch (s0) {
case ((NI) 1):
case ((NI) 2):
case ((NI) 4):
case ((NI) 8):
{
TY532811 LOC265;
memset((void*)LOC265, 0, sizeof(LOC265));
LOC265[0] = result0;
LOC265[1] = rope_178401_2381377266(((NI64) ((NI)(s0 * ((NI) 8)))));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_106), LOC265, 2);
}
break;
default:
{
TY532811 LOC267;
NI64 LOC268;
memset((void*)LOC267, 0, sizeof(LOC267));
LOC267[0] = result0;
LOC268 = (NI64)0;
LOC268 = getsize_320135_3876443242(t_534942_839829468);
LOC267[1] = rope_178401_2381377266(LOC268);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_107), LOC267, 2);
}
break;
}
}
LA261: ;
}
break;
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 15):
case ((Ttypekind292244) 46):
case ((Ttypekind292244) 47):
case ((Ttypekind292244) 49):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC270;
LOC270 = (Ttype292840*)0;
LOC270 = lastson_295377_850551059(t_534942_839829468);
result0 = gettypedescaux_533503_839829468(m0, LOC270, check0);
}
break;
default:
{
NimStringDesc* LOC272;
LOC272 = (NimStringDesc*)0;
LOC272 = rawNewString(reprEnum((NI)(*t_534942_839829468).kind, (&NTI292244))->Sup.len + 16);
appendString(LOC272, ((NimStringDesc*) &T839829468_108));
appendString(LOC272, reprEnum((NI)(*t_534942_839829468).kind, (&NTI292244)));
appendChar(LOC272, 41);
internalerror_196113_155036129(LOC272);
result0 = NIM_NIL;
}
break;
}
excl_268841_2627731572(check0, (*t_534942_839829468).Sup.id);
}BeforeRet: ;
return result0;
}
static N_INLINE(NIM_BOOL, iscompiletimeonly_328706_3876443242)(Ttype292840* t0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((*t0).kind == ((Ttypekind292244) 8) || (*t0).kind == ((Ttypekind292244) 59));
return result0;
}
N_NIMCALL(Tstorageloc292812, paramstorageloc_534098_839829468)(Tsym292834* param0) {
Tstorageloc292812 result0;
result0 = (Tstorageloc292812)0;
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*param0).typ, 8388864);
if (!!(((*LOC3).kind == ((Ttypekind292244) 16) || (*LOC3).kind == ((Ttypekind292244) 27) || (*LOC3).kind == ((Ttypekind292244) 48) || (*LOC3).kind == ((Ttypekind292244) 4)))) goto LA4;
result0 = ((Tstorageloc292812) 2);
}
goto LA1;
LA4: ;
{
result0 = ((Tstorageloc292812) 0);
}
LA1: ;
return result0;
}
N_NIMCALL(NIM_BOOL, ccgintroducedptr_533609_839829468)(Tsym292834* s0) {
NIM_BOOL result0;
Ttype292840* pt0;
{ result0 = (NIM_BOOL)0;
pt0 = skiptypes_296099_850551059((*s0).typ, IL64(211106232576256));
{
if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 13))&31U)))!=0)) goto LA3;
result0 = NIM_TRUE;
goto BeforeRet;
}
goto LA1;
LA3: ;
{
if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 12))&31U)))!=0)) goto LA6;
result0 = NIM_FALSE;
goto BeforeRet;
}
goto LA1;
LA6: ;
LA1: ;
switch ((*pt0).kind) {
case ((Ttypekind292244) 17):
{
{
NIM_BOOL LOC11;
NI64 LOC13;
LOC11 = (NIM_BOOL)0;
LOC11 = (((*s0).options &(1U<<((NU)(((Toption169009) 18))&31U)))!=0);
if (LOC11) goto LA12;
LOC13 = (NI64)0;
LOC13 = getsize_320135_3876443242(pt0);
LOC11 = (((NI64) ((NI)(floatsize_176642_4151366050 * ((NI) 2)))) < LOC13);
LA12: ;
if (!LOC11) goto LA14;
result0 = NIM_TRUE;
}
goto LA9;
LA14: ;
{
NIM_BOOL LOC17;
LOC17 = (NIM_BOOL)0;
LOC17 = (((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (!(LOC17)) goto LA18;
LOC17 = ((*pt0).sons->data[((NI) 0)] == NIM_NIL);
LA18: ;
if (!LOC17) goto LA19;
result0 = NIM_FALSE;
}
goto LA9;
LA19: ;
{
result0 = NIM_TRUE;
}
LA9: ;
}
break;
case ((Ttypekind292244) 18):
{
NIM_BOOL LOC23;
NI64 LOC24;
LOC23 = (NIM_BOOL)0;
LOC24 = (NI64)0;
LOC24 = getsize_320135_3876443242(pt0);
LOC23 = (((NI64) ((NI)(floatsize_176642_4151366050 * ((NI) 2)))) < LOC24);
if (LOC23) goto LA25;
LOC23 = (((*s0).options &(1U<<((NU)(((Toption169009) 18))&31U)))!=0);
LA25: ;
result0 = LOC23;
}
break;
default:
{
result0 = NIM_FALSE;
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Tctypekind529007, mapreturntype_533445_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
result0 = (Tctypekind529007)0;
result0 = maptype_533393_839829468(typ0);
return result0;
}
N_NIMCALL(void, genprocparams_534115_839829468)(Tcgen529027* m0, Ttype292840* t0, Ropeobj178006** rettype0, Ropeobj178006** params0, Intset268030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0) {
unsureAsgnRef((void**) (&(*params0)), NIM_NIL);
{
NIM_BOOL LOC3;
TY533289 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).sons->data[((NI) 0)] == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = isinvalidreturntype_533548_839829468((*t0).sons->data[((NI) 0)]);
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
unsureAsgnRef((void**) (&(*rettype0)), HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC7, 0));
}
goto LA1;
LA5: ;
{
unsureAsgnRef((void**) (&(*rettype0)), gettypedescaux_533503_839829468(m0, (*t0).sons->data[((NI) 0)], check0));
}
LA1: ;
{
NI i_534152_839829468;
NI HEX3Atmp_534353_839829468;
NI LOC10;
NI res_534356_839829468;
i_534152_839829468 = (NI)0;
HEX3Atmp_534353_839829468 = (NI)0;
LOC10 = (NI)0;
LOC10 = sonslen_295351_850551059((*t0).n);
HEX3Atmp_534353_839829468 = (NI)(LOC10 - ((NI) 1));
res_534356_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534356_839829468 <= HEX3Atmp_534353_839829468)) goto LA12;
i_534152_839829468 = res_534356_839829468;
{
Tsym292834* param0;
Ropeobj178006* LOC29;
Tstorageloc292812 LOC30;
TY533289 LOC45;
Ropeobj178006* LOC46;
Ttype292840* arr0;
NI j0;
{
if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_534152_839829468]).kind == ((Tnodekind292020) 3)))) goto LA16;
internalerror_196100_155036129((*(*t0).n).info, ((NimStringDesc*) &T839829468_109));
}
LA16: ;
param0 = (*(*(*t0).n).kindU.S6.sons->data[i_534152_839829468]).kindU.S4.sym;
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = iscompiletimeonly_328706_3876443242((*param0).typ);
if (!LOC20) goto LA21;
goto LA13;
}
LA21: ;
{
TY533289 LOC27;
Ropeobj178006* LOC28;
if (!!(((*params0) == NIM_NIL))) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC27, 0);
add_178482_2381377266(params0, LOC28);
}
LA25: ;
LOC29 = (Ropeobj178006*)0;
LOC29 = manglename_533205_839829468(param0);
LOC30 = (Tstorageloc292812)0;
LOC30 = paramstorageloc_534098_839829468(param0);
fillloc_532282_839829468((&(*param0).loc), ((Tlockind292808) 4), (*param0).typ, LOC29, LOC30);
{
NIM_BOOL LOC33;
Ropeobj178006* LOC36;
TY533289 LOC37;
Ropeobj178006* LOC38;
LOC33 = (NIM_BOOL)0;
LOC33 = ccgintroducedptr_533609_839829468(param0);
if (!LOC33) goto LA34;
LOC36 = (Ropeobj178006*)0;
LOC36 = gettypedescweak_534079_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC36);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC38 = (Ropeobj178006*)0;
LOC38 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_53), LOC37, 0);
add_178482_2381377266(params0, LOC38);
(*param0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
(*param0).loc.s = ((Tstorageloc292812) 0);
}
goto LA31;
LA34: ;
{
Ropeobj178006* LOC42;
if (!weakdep0) goto LA40;
LOC42 = (Ropeobj178006*)0;
LOC42 = gettypedescweak_534079_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC42);
}
goto LA31;
LA40: ;
{
Ropeobj178006* LOC44;
LOC44 = (Ropeobj178006*)0;
LOC44 = gettypedescaux_533503_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC44);
}
LA31: ;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC45, 0);
add_178482_2381377266(params0, LOC46);
add_178482_2381377266(params0, (*param0).loc.r);
arr0 = (*param0).typ;
{
if (!((*arr0).kind == ((Ttypekind292244) 23))) goto LA49;
arr0 = (*arr0).sons->data[((NI) 0)];
}
LA49: ;
j0 = ((NI) 0);
{
while (1) {
TY532811 LOC57;
if (!((*arr0).kind == ((Ttypekind292244) 27) || (*arr0).kind == ((Ttypekind292244) 48))) goto LA52;
{
if (!((*(*param0).typ).kind == ((Ttypekind292244) 23))) goto LA55;
(*param0).loc.s = ((Tstorageloc292812) 0);
}
LA55: ;
memset((void*)LOC57, 0, sizeof(LOC57));
LOC57[0] = (*param0).loc.r;
LOC57[1] = rope_178401_2381377266(((NI64) (j0)));
addf_179205_2381377266(params0, ((NimStringDesc*) &T839829468_112), LOC57, 2);
j0 += ((NI) 1);
arr0 = (*arr0).sons->data[((NI) 0)];
} LA52: ;
}
} LA13: ;
res_534356_839829468 += ((NI) 1);
} LA12: ;
}
}
{
NIM_BOOL LOC60;
Ttype292840* arr0;
TY533289 LOC76;
LOC60 = (NIM_BOOL)0;
LOC60 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
if (!(LOC60)) goto LA61;
LOC60 = isinvalidreturntype_533548_839829468((*t0).sons->data[((NI) 0)]);
LA61: ;
if (!LOC60) goto LA62;
arr0 = (*t0).sons->data[((NI) 0)];
{
if (!!(((*params0) == NIM_NIL))) goto LA66;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA66: ;
{
Tctypekind529007 LOC70;
Ropeobj178006* LOC73;
LOC70 = (Tctypekind529007)0;
LOC70 = mapreturntype_533445_839829468((*t0).sons->data[((NI) 0)]);
if (!!((LOC70 == ((Tctypekind529007) 17)))) goto LA71;
LOC73 = (Ropeobj178006*)0;
LOC73 = gettypedescweak_534079_839829468(m0, arr0, check0);
add_178482_2381377266(params0, LOC73);
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_53));
}
goto LA68;
LA71: ;
{
Ropeobj178006* LOC75;
LOC75 = (Ropeobj178006*)0;
LOC75 = gettypedescaux_533503_839829468(m0, arr0, check0);
add_178482_2381377266(params0, LOC75);
}
LA68: ;
memset((void*)LOC76, 0, sizeof(LOC76));
addf_179205_2381377266(params0, ((NimStringDesc*) &T839829468_113), LOC76, 0);
}
LA62: ;
{
NIM_BOOL LOC79;
LOC79 = (NIM_BOOL)0;
LOC79 = ((*t0).callconv == ((Tcallingconvention292002) 8));
if (!(LOC79)) goto LA80;
LOC79 = declareenvironment0;
LA80: ;
if (!LOC79) goto LA81;
{
if (!!(((*params0) == NIM_NIL))) goto LA85;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA85: ;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_114));
}
LA81: ;
{
if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)) goto LA89;
{
if (!!(((*params0) == NIM_NIL))) goto LA93;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA93: ;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_115));
}
LA89: ;
{
if (!((*params0) == NIM_NIL)) goto LA97;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_116));
}
goto LA95;
LA97: ;
{
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_117));
}
LA95: ;
unsureAsgnRef((void**) (&(*params0)), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_118), (*params0)));
}
N_NIMCALL(Ropeobj178006*, genprocheader_535867_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
Ropeobj178006* result0;
Ropeobj178006* rettype0;
Ropeobj178006* params0;
Intset268030 check0;
Ropeobj178006* LOC13;
result0 = (Ropeobj178006*)0;
rettype0 = (Ropeobj178006*)0;
params0 = (Ropeobj178006*)0;
genclinedir_532813_839829468(&result0, (*prc0).info);
{
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 5))&15U)))!=0)) goto LA3;
{
if (!(((*m0).flags &(1U<<((NU)(((Codegenflag529025) 3))&7U)))!=0)) goto LA7;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_22));
}
goto LA5;
LA7: ;
{
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_23));
}
LA5: ;
}
goto LA1;
LA3: ;
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA11;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_24));
}
goto LA1;
LA11: ;
LA1: ;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
LOC13 = (Ropeobj178006*)0;
LOC13 = manglename_533205_839829468(prc0);
fillloc_532282_839829468((&(*prc0).loc), ((Tlockind292808) 7), (*prc0).typ, LOC13, ((Tstorageloc292812) 0));
genprocparams_534115_839829468(m0, (*prc0).typ, &rettype0, ¶ms0, (&check0), NIM_TRUE, NIM_FALSE);
{
TY535235 LOC18;
if (!(*prc0).constraint == 0) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rope_178277_2381377266(Callingconvtostr_533585_839829468[((*(*prc0).typ).callconv)- 0]);
LOC18[1] = rettype0;
LOC18[2] = (*prc0).loc.r;
LOC18[3] = params0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_119), LOC18, 4);
}
goto LA14;
LA16: ;
{
TY535238 LOC20;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rettype0;
LOC20[1] = (*prc0).loc.r;
LOC20[2] = params0;
result0 = HEX25_178905_2381377266((*(*prc0).constraint).kindU.S3.strval, LOC20, 3);
}
LA14: ;
return result0;
}
static N_INLINE(Tnode292802*, HEX5BHEX5D_293238_850551059)(Tnode292802* n0, NI i0) {
Tnode292802* result0;
result0 = (Tnode292802*)0;
result0 = (*n0).kindU.S6.sons->data[i0];
return result0;
}
N_NIMCALL(Tnode292802*, easyresultasgn_560191_839829468)(Tnode292802* n0) {
Tnode292802* result0;
{ result0 = (Tnode292802*)0;
switch ((*n0).kind) {
case ((Tnodekind292020) 115):
case ((Tnodekind292020) 126):
{
NI i0;
i0 = ((NI) 0);
{
while (1) {
NIM_BOOL LOC4;
NI LOC5;
Tnode292802* LOC7;
LOC4 = (NIM_BOOL)0;
LOC5 = (NI)0;
LOC5 = len_293081_850551059(n0);
LOC4 = (i0 < LOC5);
if (!(LOC4)) goto LA6;
LOC7 = (Tnode292802*)0;
LOC7 = HEX5BHEX5D_293238_850551059(n0, i0);
LOC4 = ((*LOC7).kind == ((Tnodekind292020) 1) || (*LOC7).kind >= ((Tnodekind292020) 79) && (*LOC7).kind <= ((Tnodekind292020) 81) || (*LOC7).kind == ((Tnodekind292020) 84) || (*LOC7).kind == ((Tnodekind292020) 98) || (*LOC7).kind == ((Tnodekind292020) 101) || (*LOC7).kind == ((Tnodekind292020) 125));
LA6: ;
if (!LOC4) goto LA3;
i0 += ((NI) 1);
} LA3: ;
}
{
NI LOC10;
Tnode292802* LOC13;
LOC10 = (NI)0;
LOC10 = len_293081_850551059(n0);
if (!(i0 < LOC10)) goto LA11;
LOC13 = (Tnode292802*)0;
LOC13 = HEX5BHEX5D_293238_850551059(n0, i0);
result0 = easyresultasgn_560191_839829468(LOC13);
}
LA11: ;
}
break;
case ((Tnodekind292020) 73):
case ((Tnodekind292020) 74):
{
{
NIM_BOOL LOC17;
Tnode292802* LOC18;
Tnode292802* LOC20;
LOC17 = (NIM_BOOL)0;
LOC18 = (Tnode292802*)0;
LOC18 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
LOC17 = ((*LOC18).kind == ((Tnodekind292020) 3));
if (!(LOC17)) goto LA19;
LOC20 = (Tnode292802*)0;
LOC20 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
LOC17 = (((Tsymkind292435) 11) == (*(*LOC20).kindU.S4.sym).kind);
LA19: ;
if (!LOC17) goto LA21;
(*n0).flags |= ((NU16)1)<<((((Tnodeflag292427) 14))%(sizeof(NU16)*8));
result0 = HEX5BHEX5D_293238_850551059(n0, ((NI) 1));
goto BeforeRet;
}
LA21: ;
}
break;
case ((Tnodekind292020) 109):
{
{
NI LOC26;
Tnode292802* LOC29;
LOC26 = (NI)0;
LOC26 = len_293081_850551059(n0);
if (!(((NI) 0) < LOC26)) goto LA27;
LOC29 = (Tnode292802*)0;
LOC29 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
result0 = easyresultasgn_560191_839829468(LOC29);
{
if (!!((result0 == NIM_NIL))) goto LA32;
(*n0).flags |= ((NU16)1)<<((((Tnodeflag292427) 14))%(sizeof(NU16)*8));
}
LA32: ;
}
LA27: ;
}
break;
default:
{
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypedesc_535671_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
Intset268030 check0;
result0 = (Ropeobj178006*)0;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
result0 = gettypedescaux_533503_839829468(m0, typ0, (&check0));
return result0;
}
N_NIMCALL(Ropeobj178006*, localvardecl_538532_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
Ropeobj178006* LOC5;
if (!((*s0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(s0);
fillloc_532282_839829468((&(*s0).loc), ((Tlockind292808) 2), (*s0).typ, LOC5, ((Tstorageloc292812) 2));
{
if (!((*s0).kind == ((Tsymkind292435) 9))) goto LA8;
(*s0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 2))%(sizeof(NU16)*8));
}
LA8: ;
}
LA3: ;
result0 = gettypedesc_535671_839829468((*p0).module, (*s0).loc.t);
{
if (!(*s0).constraint == 0) goto LA12;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA16;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_121));
}
LA16: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA20;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_122));
}
LA20: ;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_111));
add_178482_2381377266(&result0, (*s0).loc.r);
}
goto LA10;
LA12: ;
{
TY532811 LOC23;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = result0;
LOC23[1] = (*s0).loc.r;
result0 = HEX25_178905_2381377266((*(*s0).constraint).kindU.S3.strval, LOC23, 2);
}
LA10: ;
return result0;
}
N_NIMCALL(void, initloc_532273_839829468)(Tloc292816* result0, Tlockind292808 k0, Ttype292840* typ0, Tstorageloc292812 s0) {
(*result0).k = k0;
(*result0).s = s0;
unsureAsgnRef((void**) (&(*result0).t), typ0);
unsureAsgnRef((void**) (&(*result0).r), NIM_NIL);
(*result0).flags = 0;
}
N_NIMCALL(void, initlocexprsingleuse_539289_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0) {
initloc_532273_839829468(result0, ((Tlockind292808) 0), (*e0).typ, ((Tstorageloc292812) 0));
(*result0).flags |= ((NU16)1)<<((((Tlocflag292810) 8))%(sizeof(NU16)*8));
expr_539248_839829468(p0, e0, result0);
}
static N_INLINE(Ropeobj178006**, s_529179_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0) {
Ropeobj178006** result0;
result0 = (Ropeobj178006**)0;
result0 = &(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].sections[(s0)- 0];
return result0;
}
N_NIMCALL(Ropeobj178006*, indentline_532656_839829468)(Tcproc529021* p0, Ropeobj178006* r0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = r0;
{
NI i_532680_839829468;
NI HEX3Atmp_532683_839829468;
NI res_532686_839829468;
i_532680_839829468 = (NI)0;
HEX3Atmp_532683_839829468 = (NI)0;
HEX3Atmp_532683_839829468 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
res_532686_839829468 = ((NI) 0);
{
while (1) {
if (!(res_532686_839829468 <= HEX3Atmp_532683_839829468)) goto LA3;
i_532680_839829468 = res_532686_839829468;
prepend_178893_2381377266(&result0, indent_532655_839829468);
res_532686_839829468 += ((NI) 1);
} LA3: ;
}
}
return result0;
}
N_NIMCALL(void, linefmt_532714_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(Ropeobj178006*, rdloc_538188_839829468)(Tloc292816 a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = a0.r;
{
TY178507 LOC5;
if (!((a0.flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC5, 1);
}
LA3: ;
return result0;
}
N_NIMCALL(void, line_532690_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, Ropeobj178006* r0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = indentline_532656_839829468(p0, r0);
add_178482_2381377266(LOC1, LOC2);
}
N_NIMCALL(void, linef_532700_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(void, gentypeinfoauxbase_535960_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0, Ropeobj178006* base0) {
NI nimtypekind0;
Ropeobj178006* size0;
TY535235 LOC17;
NI flags0;
Ropeobj178006* LOC33;
TY532811 LOC34;
NimStringDesc* LOC35;
nimtypekind0 = (NI)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isobjlackingtypefield_533513_839829468(typ0);
if (!LOC3) goto LA4;
nimtypekind0 = ((NI) 18);
}
goto LA1;
LA4: ;
{
nimtypekind0 = ((NI) ((*typ0).kind));
}
LA1: ;
size0 = (Ropeobj178006*)0;
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)) goto LA9;
size0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_133));
}
goto LA7;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC12) goto LA13;
LOC12 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
size0 = gettypedesc_535671_839829468(m0, origtype0);
}
goto LA7;
LA14: ;
{
size0 = gettypedesc_535671_839829468(m0, typ0);
}
LA7: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = name0;
LOC17[1] = size0;
LOC17[2] = rope_178401_2381377266(((NI64) (nimtypekind0)));
LOC17[3] = base0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_134), LOC17, 4);
flags0 = ((NI) 0);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = containsgarbagecollectedref_320117_3876443242(typ0);
if (!!(LOC20)) goto LA21;
flags0 = (NI)(flags0 | ((NI) 1));
}
LA21: ;
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = canformacycle_320123_3876443242(typ0);
if (!!(LOC25)) goto LA26;
flags0 = (NI)(flags0 | ((NI) 2));
}
LA26: ;
{
TY532811 LOC32;
if (!!((flags0 == ((NI) 0)))) goto LA30;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = name0;
LOC32[1] = rope_178401_2381377266(((NI64) (flags0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_135), LOC32, 2);
}
LA30: ;
LOC33 = (Ropeobj178006*)0;
LOC33 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_129));
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = name0;
LOC35 = (NimStringDesc*)0;
LOC35 = typetostring_320017_3876443242(typ0, ((Tprefereddesc320011) 0));
LOC34[1] = rope_178277_2381377266(LOC35);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_136), LOC34, 2);
}
N_NIMCALL(Ropeobj178006*, getnimnode_535945_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
TY532811 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = (*m0).typenodesname;
LOC1[1] = rope_178401_2381377266(((NI64) ((*m0).typenodes)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_138), LOC1, 2);
(*m0).typenodes += ((NI) 1);
return result0;
}
N_NIMCALL(void, gentupleinfo_536549_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* LOC1;
Ropeobj178006* expr0;
NI length0;
TY532811 LOC15;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
gentypeinfoauxbase_535960_839829468(m0, typ0, typ0, name0, LOC1);
expr0 = getnimnode_535945_839829468(m0);
length0 = sonslen_295327_850551059(typ0);
{
Ropeobj178006* tmp0;
TY532811 LOC6;
TY535238 LOC12;
if (!(((NI) 0) < length0)) goto LA4;
tmp0 = gettempname_533596_839829468(m0);
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = tmp0;
LOC6[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC6, 2);
{
NI i_536571_839829468;
NI HEX3Atmp_536590_839829468;
NI res_536593_839829468;
i_536571_839829468 = (NI)0;
HEX3Atmp_536590_839829468 = (NI)0;
HEX3Atmp_536590_839829468 = (NI)(length0 - ((NI) 1));
res_536593_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* a0;
Ropeobj178006* tmp20;
TY535238 LOC10;
TY535235 LOC11;
if (!(res_536593_839829468 <= HEX3Atmp_536590_839829468)) goto LA9;
i_536571_839829468 = res_536593_839829468;
a0 = (*typ0).sons->data[i_536571_839829468];
tmp20 = getnimnode_535945_839829468(m0);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = tmp0;
LOC10[1] = rope_178401_2381377266(((NI64) (i_536571_839829468)));
LOC10[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC10, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = tmp20;
LOC11[1] = gettypedesc_535671_839829468(m0, typ0);
LOC11[2] = rope_178401_2381377266(((NI64) (i_536571_839829468)));
LOC11[3] = gentypeinfo_535941_839829468(m0, a0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_141), LOC11, 4);
res_536593_839829468 += ((NI) 1);
} LA9: ;
}
}
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = expr0;
LOC12[1] = rope_178401_2381377266(((NI64) (length0)));
LOC12[2] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC12, 3);
}
goto LA2;
LA4: ;
{
TY532811 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = expr0;
LOC14[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC14, 2);
}
LA2: ;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = name0;
LOC15[1] = expr0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC15, 2);
}
N_NIMCALL(Ttype292840*, fakeclosuretype_537010_839829468)(Tsym292834* owner0) {
Ttype292840* result0;
Ttype292840* LOC1;
Ttype292840* r0;
Ttype292840* LOC2;
result0 = (Ttype292840*)0;
result0 = newtype_295107_850551059(((Ttypekind292244) 18), owner0);
LOC1 = (Ttype292840*)0;
LOC1 = newtype_295107_850551059(((Ttypekind292244) 26), owner0);
rawaddson_296394_850551059(result0, LOC1);
r0 = newtype_295107_850551059(((Ttypekind292244) 22), owner0);
LOC2 = (Ttype292840*)0;
LOC2 = newtype_295107_850551059(((Ttypekind292244) 18), owner0);
rawaddson_296394_850551059(r0, LOC2);
rawaddson_296394_850551059(result0, r0);
return result0;
}
N_NIMCALL(void, gentypeinfoaux_536027_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0) {
Ropeobj178006* base0;
base0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NI LOC4;
Ttype292840* x0;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = sonslen_295327_850551059(typ0);
LOC3 = (((NI) 0) < LOC4);
if (!(LOC3)) goto LA5;
LOC3 = !(((*typ0).sons->data[((NI) 0)] == NIM_NIL));
LA5: ;
if (!LOC3) goto LA6;
x0 = (*typ0).sons->data[((NI) 0)];
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA10;
x0 = skiptypes_296099_850551059(x0, IL64(211106247215360));
}
LA10: ;
base0 = gentypeinfo_535941_839829468(m0, x0);
}
goto LA1;
LA6: ;
{
base0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
}
LA1: ;
gentypeinfoauxbase_535960_839829468(m0, typ0, origtype0, name0, base0);
}
static N_INLINE(NIM_BOOL, iscomplexvaluetype_538317_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*t0).kind == ((Ttypekind292244) 16) || (*t0).kind == ((Ttypekind292244) 4) || (*t0).kind == ((Ttypekind292244) 19) || (*t0).kind == ((Ttypekind292244) 18) || (*t0).kind == ((Ttypekind292244) 17));
if (LOC1) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
LOC1 = LOC3;
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, usestringh_532345_839829468)(Tcgen529027* m0) {
{
NIM_BOOL LOC5;
if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 4))&7U)))!=0))) goto LA3;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 4))%(sizeof(NU8)*8));
LOC5 = (NIM_BOOL)0;
LOC5 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_151));
}
LA3: ;
}
N_NIMCALL(Ropeobj178006*, addrloc_538204_839829468)(Tloc292816 a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = a0.r;
{
NIM_BOOL LOC3;
Tctypekind529007 LOC5;
Ropeobj178006* LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0));
if (!(LOC3)) goto LA4;
LOC5 = (Tctypekind529007)0;
LOC5 = maptype_533393_839829468(a0.t);
LOC3 = !((LOC5 == ((Tctypekind529007) 17)));
LA4: ;
if (!LOC3) goto LA6;
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_128), result0);
result0 = HEX26_178447_2381377266(LOC8, ((NimStringDesc*) &T839829468_117));
}
LA6: ;
return result0;
}
N_NIMCALL(void, genobjectinit_538242_839829468)(Tcproc529021* p0, Tcprocsection529011 section0, Ttype292840* t0, Tloc292816 a0, NIM_BOOL takeaddr0) {
Ttypefieldresult320145 LOC1;
LOC1 = (Ttypefieldresult320145)0;
LOC1 = analyseobjectwithtypefield_320149_3876443242(t0);
switch (LOC1) {
case ((Ttypefieldresult320145) 0):
{
}
break;
case ((Ttypefieldresult320145) 1):
{
Ropeobj178006* r0;
Ttype292840* s0;
TY532811 LOC19;
r0 = rdloc_538188_839829468(a0);
{
TY178507 LOC8;
if (!!(takeaddr0)) goto LA6;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC8, 1);
}
LA6: ;
s0 = skiptypes_296099_850551059(t0, IL64(211106232576256));
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC11) goto LA12;
LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA12: ;
if (!!(LOC11)) goto LA13;
{
while (1) {
NIM_BOOL LOC17;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*s0).kind == ((Ttypekind292244) 17));
if (!(LOC17)) goto LA18;
LOC17 = !(((*s0).sons->data[((NI) 0)] == NIM_NIL));
LA18: ;
if (!LOC17) goto LA16;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
s0 = skiptypes_296099_850551059((*s0).sons->data[((NI) 0)], IL64(211106247215360));
} LA16: ;
}
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = r0;
LOC19[1] = gentypeinfo_535941_839829468((*p0).module, t0);
linefmt_532714_839829468(p0, section0, ((NimStringDesc*) &T839829468_154), LOC19, 2);
}
break;
case ((Ttypefieldresult320145) 2):
{
Ropeobj178006* r0;
TY532811 LOC26;
{
if (!takeaddr0) goto LA23;
r0 = addrloc_538204_839829468(a0);
}
goto LA21;
LA23: ;
{
r0 = rdloc_538188_839829468(a0);
}
LA21: ;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = r0;
LOC26[1] = gentypeinfo_535941_839829468((*p0).module, t0);
linefmt_532714_839829468(p0, section0, ((NimStringDesc*) &T839829468_155), LOC26, 2);
}
break;
}
}
N_NIMCALL(void, constructloc_538388_839829468)(Tcproc529021* p0, Tloc292816 loc0, NIM_BOOL istemp0) {
Ttype292840* typ0;
typ0 = skiptypes_296099_850551059(loc0.t, IL64(211106233624832));
{
NIM_BOOL LOC3;
TY532811 LOC6;
LOC3 = (NIM_BOOL)0;
LOC3 = iscomplexvaluetype_538317_839829468(typ0);
if (!!(LOC3)) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rdloc_538188_839829468(loc0);
LOC6[1] = gettypedesc_535671_839829468((*p0).module, typ0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_150), LOC6, 2);
}
goto LA1;
LA4: ;
{
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = !(istemp0);
if (LOC10) goto LA11;
LOC10 = containsgarbagecollectedref_320117_3876443242(loc0.t);
LA11: ;
if (!LOC10) goto LA12;
{
NIM_BOOL LOC16;
TY532811 LOC19;
LOC16 = (NIM_BOOL)0;
LOC16 = isimportedcpptype_533476_839829468(typ0);
if (!!(LOC16)) goto LA17;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(loc0);
LOC19[1] = rdloc_538188_839829468(loc0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_152), LOC19, 2);
}
LA17: ;
}
LA12: ;
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), loc0.t, loc0, NIM_TRUE);
}
LA1: ;
}
N_NIMCALL(void, gettemp_537032_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* result0, NIM_BOOL needsinit0) {
Ropeobj178006* LOC1;
TY532811 LOC2;
(*p0).labels += ((NI) 1);
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*p0).labels)));
unsureAsgnRef((void**) (&(*result0).r), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_149), LOC1));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC2[1] = (*result0).r;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_54), LOC2, 2);
(*result0).k = ((Tlockind292808) 1);
unsureAsgnRef((void**) (&(*result0).t), t0);
(*result0).s = ((Tstorageloc292812) 2);
(*result0).flags = 0;
constructloc_538388_839829468(p0, (*result0), !(needsinit0));
}
static N_INLINE(Ropeobj178006*, parentobj_537257_839829468)(Ropeobj178006* accessor0, Tcgen529027* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
TY178507 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = accessor0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_161), LOC7, 1);
}
goto LA1;
LA5: ;
{
result0 = accessor0;
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, intliteral_539270_839829468)(NI64 i0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (IL64(-2147483648) < i0);
if (!(LOC3)) goto LA4;
LOC3 = (i0 <= IL64(2147483647));
LA4: ;
if (!LOC3) goto LA5;
result0 = rope_178401_2381377266(i0);
}
goto LA1;
LA5: ;
{
TY533289 LOC10;
if (!(i0 == IL64(-2147483648))) goto LA8;
memset((void*)LOC10, 0, sizeof(LOC10));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_166), LOC10, 0);
}
goto LA1;
LA8: ;
{
TY178507 LOC14;
if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178401_2381377266(i0);
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC14, 1);
}
goto LA1;
LA12: ;
{
TY533289 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_168), LOC16, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, int64literal_549430_839829468)(NI64 i0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(i0);
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_168), LOC7, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, uint64literal_549442_839829468)(NU64 i0) {
Ropeobj178006* result0;
NimStringDesc* LOC1;
NimStringDesc* LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (NimStringDesc*)0;
LOC2 = (NimStringDesc*)0;
LOC2 = HEX24_8401_1689653243(i0);
LOC1 = rawNewString(LOC2->Sup.len + 3);
appendString(LOC1, LOC2);
appendString(LOC1, ((NimStringDesc*) &T839829468_171));
result0 = rope_178277_2381377266(LOC1);
return result0;
}
N_NIMCALL(Ropeobj178006*, getstrlit_549468_839829468)(Tcgen529027* m0, NimStringDesc* s0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
TY535238 LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_79));
result0 = gettempname_533596_839829468(m0);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = result0;
LOC2[1] = makecstring_191638_155036129(s0);
LOC2[2] = rope_178401_2381377266(((NI64) ((s0 ? s0->Sup.len : 0))));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_177), LOC2, 3);
return result0;
}
N_NIMCALL(Ropeobj178006*, genliteral_549476_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* ty0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(ty0 == NIM_NIL)) goto LA3;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_165));
}
LA3: ;
switch ((*n0).kind) {
case ((Tnodekind292020) 5) ... ((Tnodekind292020) 15):
{
Ttype292840* LOC6;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
switch ((*LOC6).kind) {
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 5):
{
result0 = intliteral_539270_839829468((*n0).kindU.S1.intval);
}
break;
case ((Ttypekind292244) 1):
{
{
TY533289 LOC13;
if (!!(((*n0).kindU.S1.intval == IL64(0)))) goto LA11;
memset((void*)LOC13, 0, sizeof(LOC13));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_169), LOC13, 0);
}
goto LA9;
LA11: ;
{
TY533289 LOC15;
memset((void*)LOC15, 0, sizeof(LOC15));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_170), LOC15, 0);
}
LA9: ;
}
break;
case ((Ttypekind292244) 35):
{
result0 = int64literal_549430_839829468((*n0).kindU.S1.intval);
}
break;
case ((Ttypekind292244) 44):
{
result0 = uint64literal_549442_839829468(((NU64) ((*n0).kindU.S1.intval)));
}
break;
default:
{
TY532811 LOC19;
Ttype292840* LOC20;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (Ttype292840*)0;
LOC20 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
LOC19[0] = gettypedesc_535671_839829468((*p0).module, LOC20);
LOC19[1] = intliteral_539270_839829468((*n0).kindU.S1.intval);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_172), LOC19, 2);
}
break;
}
}
break;
case ((Tnodekind292020) 23):
{
Ttype292840* t0;
t0 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
{
NIM_BOOL LOC24;
NI id0;
Ropeobj178006* LOC28;
LOC24 = (NIM_BOOL)0;
LOC24 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC24)) goto LA25;
LOC24 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA25: ;
if (!LOC24) goto LA26;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC28 = (Ropeobj178006*)0;
LOC28 = rope_178401_2381377266(((NI64) (id0)));
result0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC28);
{
TY532811 LOC33;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA31;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC33[1] = result0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_173), LOC33, 2);
}
LA31: ;
}
goto LA22;
LA26: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_174));
}
LA22: ;
}
break;
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
{
TY533289 LOC40;
if (!(*n0).kindU.S3.strval == 0) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_175), LOC40, 0);
}
goto LA36;
LA38: ;
{
Ttype292840* LOC42;
NI id0;
LOC42 = (Ttype292840*)0;
LOC42 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
if (!((*LOC42).kind == ((Ttypekind292244) 28))) goto LA43;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
{
TY178507 LOC49;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA47;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = getstrlit_549468_839829468((*p0).module, (*n0).kindU.S3.strval);
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_176), LOC49, 1);
}
goto LA45;
LA47: ;
{
TY532811 LOC51;
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = (*(*p0).module).tmpbase;
LOC51[1] = rope_178401_2381377266(((NI64) (id0)));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_178), LOC51, 2);
}
LA45: ;
}
goto LA36;
LA43: ;
{
result0 = makecstring_191638_155036129((*n0).kindU.S3.strval);
}
LA36: ;
}
break;
case ((Tnodekind292020) 16) ... ((Tnodekind292020) 18):
{
NimStringDesc* LOC54;
LOC54 = (NimStringDesc*)0;
LOC54 = tostrmaxprecision_298007_3471544153((*n0).kindU.S2.floatval);
result0 = rope_178277_2381377266(LOC54);
}
break;
default:
{
NimStringDesc* LOC56;
LOC56 = (NimStringDesc*)0;
LOC56 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI292020))->Sup.len + 12);
appendString(LOC56, ((NimStringDesc*) &T839829468_179));
appendString(LOC56, reprEnum((NI)(*n0).kind, (&NTI292020)));
appendChar(LOC56, 41);
internalerror_196100_155036129((*n0).info, LOC56);
result0 = NIM_NIL;
}
break;
}
return result0;
}
N_NIMCALL(Ropeobj178006*, genliteral_539273_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = genliteral_549476_839829468(p0, n0, (*n0).typ);
return result0;
}
N_NIMCALL(void, gencaserange_537028_839829468)(Tcproc529021* p0, Tnode292802* branch0) {
NI length0;
length0 = len_293081_850551059(branch0);
{
NI j_547676_839829468;
NI HEX3Atmp_547717_839829468;
NI res_547720_839829468;
j_547676_839829468 = (NI)0;
HEX3Atmp_547717_839829468 = (NI)0;
HEX3Atmp_547717_839829468 = (NI)(length0 - ((NI) 2));
res_547720_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547720_839829468 <= HEX3Atmp_547717_839829468)) goto LA3;
j_547676_839829468 = res_547720_839829468;
{
Tnode292802* LOC6;
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(branch0, j_547676_839829468);
if (!((*LOC6).kind == ((Tnodekind292020) 44))) goto LA7;
{
TY532811 LOC13;
Tnode292802* LOC14;
Tnode292802* LOC15;
Tnode292802* LOC16;
Tnode292802* LOC17;
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 0))&7U)))!=0)) goto LA11;
memset((void*)LOC13, 0, sizeof(LOC13));
LOC14 = (Tnode292802*)0;
LOC14 = HEX5BHEX5D_293238_850551059(branch0, j_547676_839829468);
LOC15 = (Tnode292802*)0;
LOC15 = HEX5BHEX5D_293238_850551059(LOC14, ((NI) 0));
LOC13[0] = genliteral_539273_839829468(p0, LOC15);
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059(branch0, j_547676_839829468);
LOC17 = (Tnode292802*)0;
LOC17 = HEX5BHEX5D_293238_850551059(LOC16, ((NI) 1));
LOC13[1] = genliteral_539273_839829468(p0, LOC17);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_164), LOC13, 2);
}
goto LA9;
LA11: ;
{
Tnode292802* v0;
Tnode292802* LOC19;
Tnode292802* LOC20;
LOC19 = (Tnode292802*)0;
LOC19 = HEX5BHEX5D_293238_850551059(branch0, j_547676_839829468);
LOC20 = (Tnode292802*)0;
LOC20 = HEX5BHEX5D_293238_850551059(LOC19, ((NI) 0));
v0 = copynode_296528_850551059(LOC20);
{
while (1) {
Tnode292802* LOC23;
Tnode292802* LOC24;
TY178507 LOC25;
LOC23 = (Tnode292802*)0;
LOC23 = HEX5BHEX5D_293238_850551059(branch0, j_547676_839829468);
LOC24 = (Tnode292802*)0;
LOC24 = HEX5BHEX5D_293238_850551059(LOC23, ((NI) 1));
if (!((*v0).kindU.S1.intval <= (*LOC24).kindU.S1.intval)) goto LA22;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = genliteral_539273_839829468(p0, v0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_180), LOC25, 1);
(*v0).kindU.S1.intval += ((NI) 1);
} LA22: ;
}
}
LA9: ;
}
goto LA4;
LA7: ;
{
TY178507 LOC27;
Tnode292802* LOC28;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Tnode292802*)0;
LOC28 = HEX5BHEX5D_293238_850551059(branch0, j_547676_839829468);
LOC27[0] = genliteral_539273_839829468(p0, LOC28);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_180), LOC27, 1);
}
LA4: ;
res_547720_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, gentraverseproc_537039_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Tnode292802* n0) {
{ {
if (!(n0 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
{
NI i_537068_839829468;
NI HEX3Atmp_537239_839829468;
NI LOC7;
NI res_537242_839829468;
i_537068_839829468 = (NI)0;
HEX3Atmp_537239_839829468 = (NI)0;
LOC7 = (NI)0;
LOC7 = sonslen_295351_850551059(n0);
HEX3Atmp_537239_839829468 = (NI)(LOC7 - ((NI) 1));
res_537242_839829468 = ((NI) 0);
{
while (1) {
if (!(res_537242_839829468 <= HEX3Atmp_537239_839829468)) goto LA9;
i_537068_839829468 = res_537242_839829468;
gentraverseproc_537039_839829468(c0, accessor0, (*n0).kindU.S6.sons->data[i_537068_839829468]);
res_537242_839829468 += ((NI) 1);
} LA9: ;
}
}
}
break;
case ((Tnodekind292020) 139):
{
Tcproc529021* p0;
Tsym292834* disc0;
TY532811 LOC15;
TY533289 LOC28;
{
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)))) goto LA13;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_162));
}
LA13: ;
p0 = (*c0).p;
disc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = accessor0;
LOC15[1] = (*disc0).loc.r;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_163), LOC15, 2);
{
NI i_537098_839829468;
NI HEX3Atmp_537249_839829468;
NI LOC17;
NI res_537252_839829468;
i_537098_839829468 = (NI)0;
HEX3Atmp_537249_839829468 = (NI)0;
LOC17 = (NI)0;
LOC17 = sonslen_295351_850551059(n0);
HEX3Atmp_537249_839829468 = (NI)(LOC17 - ((NI) 1));
res_537252_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* branch0;
Tnode292802* LOC26;
TY533289 LOC27;
if (!(res_537252_839829468 <= HEX3Atmp_537249_839829468)) goto LA19;
i_537098_839829468 = res_537252_839829468;
branch0 = (*n0).kindU.S6.sons->data[i_537098_839829468];
{
if (!((*branch0).kind == ((Tnodekind292020) 85))) goto LA22;
gencaserange_537028_839829468((*c0).p, branch0);
}
goto LA20;
LA22: ;
{
TY533289 LOC25;
memset((void*)LOC25, 0, sizeof(LOC25));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_181), LOC25, 0);
}
LA20: ;
LOC26 = (Tnode292802*)0;
LOC26 = lastson_295364_850551059(branch0);
gentraverseproc_537039_839829468(c0, accessor0, LOC26);
memset((void*)LOC27, 0, sizeof(LOC27));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_182), LOC27, 0);
res_537252_839829468 += ((NI) 1);
} LA19: ;
}
}
memset((void*)LOC28, 0, sizeof(LOC28));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_183), LOC28, 0);
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
TY532811 LOC34;
Ropeobj178006* LOC35;
field0 = (*n0).kindU.S4.sym;
{
if (!((*field0).loc.t == NIM_NIL)) goto LA32;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184));
}
LA32: ;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = accessor0;
LOC34[1] = (*field0).loc.r;
LOC35 = (Ropeobj178006*)0;
LOC35 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC34, 2);
gentraverseproc_537022_839829468(c0, LOC35, (*field0).loc.t);
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184));
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, linecg_532707_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(void, gentraverseproc_537022_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ_537027_839829468) {
Ttype292840* typ_537302_839829468;
Tcproc529021* p0;
{ {
if (!(typ_537027_839829468 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
typ_537302_839829468 = getuniquetype_528640_2036603609(typ_537027_839829468);
p0 = (*c0).p;
switch ((*typ_537302_839829468).kind) {
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 10):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC6;
LOC6 = (Ttype292840*)0;
LOC6 = lastson_295377_850551059(typ_537302_839829468);
gentraverseproc_537022_839829468(c0, accessor0, LOC6);
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
NI64 arraysize0;
Tloc292816 i0;
Ttype292840* LOC8;
TY532811 LOC9;
TY532811 LOC10;
Ropeobj178006* LOC11;
TY533289 LOC12;
arraysize0 = lengthord_320007_3876443242((*typ_537302_839829468).sons->data[((NI) 0)]);
memset((void*)(&i0), 0, sizeof(i0));
LOC8 = (Ttype292840*)0;
LOC8 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC8, (&i0), NIM_FALSE);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = i0.r;
LOC9[1] = rope_178401_2381377266(arraysize0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_159), LOC9, 2);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = accessor0;
LOC10[1] = i0.r;
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC10, 2);
gentraverseproc_537022_839829468(c0, LOC11, (*typ_537302_839829468).sons->data[((NI) 1)]);
memset((void*)LOC12, 0, sizeof(LOC12));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC12, 0);
}
break;
case ((Ttypekind292244) 17):
{
{
NI i_537325_839829468;
NI HEX3Atmp_537384_839829468;
NI LOC15;
NI res_537387_839829468;
i_537325_839829468 = (NI)0;
HEX3Atmp_537384_839829468 = (NI)0;
LOC15 = (NI)0;
LOC15 = sonslen_295327_850551059(typ_537302_839829468);
HEX3Atmp_537384_839829468 = (NI)(LOC15 - ((NI) 1));
res_537387_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* x0;
Ropeobj178006* LOC22;
if (!(res_537387_839829468 <= HEX3Atmp_537384_839829468)) goto LA17;
i_537325_839829468 = res_537387_839829468;
x0 = (*typ_537302_839829468).sons->data[i_537325_839829468];
{
if (!!((x0 == NIM_NIL))) goto LA20;
x0 = skiptypes_296099_850551059(x0, IL64(211106247215360));
}
LA20: ;
LOC22 = (Ropeobj178006*)0;
LOC22 = parentobj_537257_839829468(accessor0, (*(*c0).p).module);
gentraverseproc_537022_839829468(c0, LOC22, x0);
res_537387_839829468 += ((NI) 1);
} LA17: ;
}
}
{
if (!!(((*typ_537302_839829468).n == NIM_NIL))) goto LA25;
gentraverseproc_537039_839829468(c0, accessor0, (*typ_537302_839829468).n);
}
LA25: ;
}
break;
case ((Ttypekind292244) 18):
{
Ttype292840* typ0;
typ0 = getuniquetype_528640_2036603609(typ_537302_839829468);
{
NI i_537363_839829468;
NI HEX3Atmp_537392_839829468;
NI LOC29;
NI res_537395_839829468;
i_537363_839829468 = (NI)0;
HEX3Atmp_537392_839829468 = (NI)0;
LOC29 = (NI)0;
LOC29 = sonslen_295327_850551059(typ0);
HEX3Atmp_537392_839829468 = (NI)(LOC29 - ((NI) 1));
res_537395_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC32;
Ropeobj178006* LOC33;
if (!(res_537395_839829468 <= HEX3Atmp_537392_839829468)) goto LA31;
i_537363_839829468 = res_537395_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = accessor0;
LOC32[1] = rope_178401_2381377266(((NI64) (i_537363_839829468)));
LOC33 = (Ropeobj178006*)0;
LOC33 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_185), LOC32, 2);
gentraverseproc_537022_839829468(c0, LOC33, (*typ0).sons->data[i_537363_839829468]);
res_537395_839829468 += ((NI) 1);
} LA31: ;
}
}
}
break;
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY178507 LOC35;
memset((void*)LOC35, 0, sizeof(LOC35));
LOC35[0] = accessor0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), (*c0).visitorfrmt, LOC35, 1);
}
break;
case ((Ttypekind292244) 25):
{
{
TY178507 LOC41;
TY178507 LOC42;
if (!((*typ_537302_839829468).callconv == ((Tcallingconvention292002) 8))) goto LA39;
memset((void*)LOC41, 0, sizeof(LOC41));
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = accessor0;
LOC41[0] = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_186), LOC42, 1);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), (*c0).visitorfrmt, LOC41, 1);
}
LA39: ;
}
break;
default:
{
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, gentraverseprocseq_537399_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ0) {
Tcproc529021* p0;
Tloc292816 i0;
Ttype292840* LOC1;
TY535238 LOC2;
NimStringDesc* LOC3;
TY532811 LOC11;
Ropeobj178006* LOC12;
TY533289 LOC13;
p0 = (*c0).p;
memset((void*)(&i0), 0, sizeof(i0));
LOC1 = (Ttype292840*)0;
LOC1 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC1, (&i0), NIM_FALSE);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = i0.r;
LOC2[1] = accessor0;
LOC3 = (NimStringDesc*)0;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC6) goto LA7;
LOC6 = (((*(*(*(*c0).p).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA7: ;
if (!LOC6) goto LA8;
LOC3 = copyString(((NimStringDesc*) &T839829468_157));
}
goto LA4;
LA8: ;
{
LOC3 = copyString(((NimStringDesc*) &T839829468_158));
}
LA4: ;
LOC2[2] = rope_178277_2381377266(LOC3);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_156), LOC2, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = accessor0;
LOC11[1] = i0.r;
LOC12 = (Ropeobj178006*)0;
LOC12 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_187), LOC11, 2);
gentraverseproc_537022_839829468(c0, LOC12, (*typ0).sons->data[((NI) 0)]);
memset((void*)LOC13, 0, sizeof(LOC13));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC13, 0);
}
N_NIMCALL(Ropeobj178006*, gentraverseproc_537632_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttypeinforeason537016 reason0) {
Ropeobj178006* result0;
Ttraversalclosure537019 c0;
Tcproc529021* p0;
Ropeobj178006* header0;
TY178507 LOC3;
Ropeobj178006* t0;
TY178507 LOC4;
TY178507 LOC5;
Ropeobj178006* generatedproc0;
TY535235 LOC20;
Ropeobj178006** LOC21;
Ropeobj178006** LOC22;
Ropeobj178006** LOC23;
TY178507 LOC24;
result0 = (Ropeobj178006*)0;
memset((void*)(&c0), 0, sizeof(c0));
p0 = newproc_529206_3723162438(NIM_NIL, m0);
result0 = gettempname_533596_839829468(m0);
switch (reason0) {
case ((Ttypeinforeason537016) 0):
{
c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_145));
}
break;
default:
{
}
break;
}
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = result0;
header0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_146), LOC3, 1);
t0 = gettypedesc_535671_839829468(m0, typ0);
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = t0;
linef_532700_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_147), LOC4, 1);
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = t0;
linef_532700_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_148), LOC5, 1);
c0.p = p0;
{
Ropeobj178006* LOC10;
if (!((*typ0).kind == ((Ttypekind292244) 24))) goto LA8;
LOC10 = (Ropeobj178006*)0;
LOC10 = rope_178277_2381377266(((NimStringDesc*) &T839829468_188));
gentraverseprocseq_537399_839829468((&c0), LOC10, typ0);
}
goto LA6;
LA8: ;
{
{
Ttype292840* LOC14;
Ropeobj178006* LOC17;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106232576256));
if (!((*LOC14).kind == ((Ttypekind292244) 4) || (*LOC14).kind == ((Ttypekind292244) 16))) goto LA15;
LOC17 = (Ropeobj178006*)0;
LOC17 = rope_178277_2381377266(((NimStringDesc*) &T839829468_188));
gentraverseproc_537022_839829468((&c0), LOC17, (*typ0).sons->data[((NI) 0)]);
}
goto LA12;
LA15: ;
{
Ropeobj178006* LOC19;
LOC19 = (Ropeobj178006*)0;
LOC19 = rope_178277_2381377266(((NimStringDesc*) &T839829468_189));
gentraverseproc_537022_839829468((&c0), LOC19, (*typ0).sons->data[((NI) 0)]);
}
LA12: ;
}
LA6: ;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = header0;
LOC21 = (Ropeobj178006**)0;
LOC21 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC20[1] = (*LOC21);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC20[2] = (*LOC22);
LOC23 = (Ropeobj178006**)0;
LOC23 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC20[3] = (*LOC23);
generatedproc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_190), LOC20, 4);
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = header0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC24, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
return result0;
}
N_NIMCALL(void, genarrayinfo_537005_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468(m0, (*typ0).sons->data[((NI) 1)]);
gentypeinfoauxbase_535960_839829468(m0, typ0, typ0, name0, LOC1);
}
N_NIMCALL(void, gensetinfo_536867_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* tmp0;
TY535238 LOC1;
NI64 LOC2;
gentypeinfoaux_536027_839829468(m0, typ0, typ0, name0);
tmp0 = getnimnode_535945_839829468(m0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = tmp0;
LOC2 = (NI64)0;
LOC2 = firstord_320001_3876443242(typ0);
LOC1[1] = rope_178401_2381377266(LOC2);
LOC1[2] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_193), LOC1, 3);
}
N_NIMCALL(void, genenuminfo_536597_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* nodeptrs0;
NI length0;
TY532811 LOC1;
Ropeobj178006* enumnames0;
Ropeobj178006* specialcases0;
NI firstnimnode0;
NIM_BOOL hasholes0;
Ropeobj178006* enumarray0;
Ropeobj178006* counter0;
TY178507 LOC24;
TY535238 LOC25;
TY536847 LOC26;
TY535235 LOC27;
gentypeinfoaux_536027_839829468(m0, typ0, typ0, name0);
nodeptrs0 = gettempname_533596_839829468(m0);
length0 = sonslen_295351_850551059((*typ0).n);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = nodeptrs0;
LOC1[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC1, 2);
enumnames0 = (Ropeobj178006*)0;
specialcases0 = (Ropeobj178006*)0;
firstnimnode0 = (*m0).typenodes;
hasholes0 = NIM_FALSE;
{
NI i_536622_839829468;
NI HEX3Atmp_536860_839829468;
NI res_536863_839829468;
i_536622_839829468 = (NI)0;
HEX3Atmp_536860_839829468 = (NI)0;
HEX3Atmp_536860_839829468 = (NI)(length0 - ((NI) 1));
res_536863_839829468 = ((NI) 0);
{
while (1) {
Tsym292834* field0;
Ropeobj178006* elemnode0;
if (!(res_536863_839829468 <= HEX3Atmp_536860_839829468)) goto LA4;
i_536622_839829468 = res_536863_839829468;
field0 = (*(*(*typ0).n).kindU.S6.sons->data[i_536622_839829468]).kindU.S4.sym;
elemnode0 = getnimnode_535945_839829468(m0);
{
Ropeobj178006* LOC9;
if (!((*field0).ast == NIM_NIL)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = makecstring_191638_155036129((*(*field0).name).s);
add_178482_2381377266(&enumnames0, LOC9);
}
goto LA5;
LA7: ;
{
Ropeobj178006* LOC11;
LOC11 = (Ropeobj178006*)0;
LOC11 = makecstring_191638_155036129((*(*field0).ast).kindU.S3.strval);
add_178482_2381377266(&enumnames0, LOC11);
}
LA5: ;
{
NimStringDesc* LOC16;
if (!(i_536622_839829468 < (NI)(length0 - ((NI) 1)))) goto LA14;
LOC16 = (NimStringDesc*)0;
LOC16 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC16, ((NimStringDesc*) &T839829468_110));
appendString(LOC16, tnl_176644_4151366050);
add_178487_2381377266(&enumnames0, LOC16);
}
LA14: ;
{
NIM_BOOL LOC19;
TY532811 LOC23;
LOC19 = (NIM_BOOL)0;
LOC19 = !(((*field0).position == i_536622_839829468));
if (LOC19) goto LA20;
LOC19 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 5))&31U)))!=0);
LA20: ;
if (!LOC19) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = elemnode0;
LOC23[1] = rope_178401_2381377266(((NI64) ((*field0).position)));
addf_179205_2381377266(&specialcases0, ((NimStringDesc*) &T839829468_194), LOC23, 2);
hasholes0 = NIM_TRUE;
}
LA21: ;
res_536863_839829468 += ((NI) 1);
} LA4: ;
}
}
enumarray0 = gettempname_533596_839829468(m0);
counter0 = gettempname_533596_839829468(m0);
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = counter0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_195), LOC24, 1);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = enumarray0;
LOC25[1] = rope_178401_2381377266(((NI64) (length0)));
LOC25[2] = enumnames0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_196), LOC25, 3);
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = counter0;
LOC26[1] = rope_178401_2381377266(((NI64) (length0)));
LOC26[2] = (*m0).typenodesname;
LOC26[3] = rope_178401_2381377266(((NI64) (firstnimnode0)));
LOC26[4] = enumarray0;
LOC26[5] = nodeptrs0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_197), LOC26, 6);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], specialcases0);
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = getnimnode_535945_839829468(m0);
LOC27[1] = rope_178401_2381377266(((NI64) (length0)));
LOC27[2] = nodeptrs0;
LOC27[3] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_198), LOC27, 4);
{
TY178507 LOC32;
if (!hasholes0) goto LA30;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_199), LOC32, 1);
}
LA30: ;
}
N_NIMCALL(Ropeobj178006*, discriminatortablename_536057_839829468)(Tcgen529027* m0, Ttype292840* objtype_536060_839829468, Tsym292834* d0) {
Ropeobj178006* result0;
Ttype292840* objtype0;
TY532811 LOC8;
NimStringDesc* LOC9;
result0 = (Ropeobj178006*)0;
objtype0 = objtype_536060_839829468;
{
while (1) {
Tsym292834* LOC3;
LOC3 = (Tsym292834*)0;
LOC3 = lookupinrecord_299119_2984716966((*objtype0).n, (*d0).name);
if (!(LOC3 == NIM_NIL)) goto LA2;
objtype0 = (*objtype0).sons->data[((NI) 0)];
} LA2: ;
}
{
if (!((*objtype0).sym == NIM_NIL)) goto LA6;
internalerror_196100_155036129((*d0).info, ((NimStringDesc*) &T839829468_200));
}
LA6: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rope_178401_2381377266(((NI64) ((*objtype0).Sup.id)));
LOC9 = (NimStringDesc*)0;
LOC9 = mangle_528847_2036603609((*(*d0).name).s);
LOC8[1] = rope_178277_2381377266(LOC9);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_201), LOC8, 2);
return result0;
}
N_NIMCALL(void, genobjectfields_536104_839829468)(Tcgen529027* m0, Ttype292840* typ0, Tnode292802* n0, Ropeobj178006* expr0) {
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
NI L0;
L0 = sonslen_295351_850551059(n0);
{
if (!(L0 == ((NI) 1))) goto LA4;
genobjectfields_536104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[((NI) 0)], expr0);
}
goto LA2;
LA4: ;
{
Ropeobj178006* tmp0;
TY532811 LOC9;
TY535238 LOC14;
if (!(((NI) 0) < L0)) goto LA7;
tmp0 = gettempname_533596_839829468(m0);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = tmp0;
LOC9[1] = rope_178401_2381377266(((NI64) (L0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC9, 2);
{
NI i_536127_839829468;
NI HEX3Atmp_536482_839829468;
NI res_536485_839829468;
i_536127_839829468 = (NI)0;
HEX3Atmp_536482_839829468 = (NI)0;
HEX3Atmp_536482_839829468 = (NI)(L0 - ((NI) 1));
res_536485_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* tmp20;
TY535238 LOC13;
if (!(res_536485_839829468 <= HEX3Atmp_536482_839829468)) goto LA12;
i_536127_839829468 = res_536485_839829468;
tmp20 = getnimnode_535945_839829468(m0);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = tmp0;
LOC13[1] = rope_178401_2381377266(((NI64) (i_536127_839829468)));
LOC13[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC13, 3);
genobjectfields_536104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[i_536127_839829468], tmp20);
res_536485_839829468 += ((NI) 1);
} LA12: ;
}
}
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = expr0;
LOC14[1] = rope_178401_2381377266(((NI64) (L0)));
LOC14[2] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC14, 3);
}
goto LA2;
LA7: ;
{
TY532811 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = expr0;
LOC16[1] = rope_178401_2381377266(((NI64) (L0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC16, 2);
}
LA2: ;
}
break;
case ((Tnodekind292020) 139):
{
Tsym292834* field0;
Ropeobj178006* tmp0;
NI64 L0;
TY536401 LOC18;
TY532811 LOC19;
field0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
tmp0 = discriminatortablename_536057_839829468(m0, typ0, field0);
L0 = lengthord_320007_3876443242((*field0).typ);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = expr0;
LOC18[1] = gettypedesc_535671_839829468(m0, typ0);
LOC18[2] = (*field0).loc.r;
LOC18[3] = gentypeinfo_535941_839829468(m0, (*field0).typ);
LOC18[4] = makecstring_191638_155036129((*(*field0).name).s);
LOC18[5] = tmp0;
LOC18[6] = rope_178401_2381377266(L0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_202), LOC18, 7);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = tmp0;
LOC19[1] = rope_178401_2381377266((NI64)(L0 + IL64(1)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_203), LOC19, 2);
{
NI i_536421_839829468;
NI HEX3Atmp_536499_839829468;
NI LOC21;
NI res_536502_839829468;
i_536421_839829468 = (NI)0;
HEX3Atmp_536499_839829468 = (NI)0;
LOC21 = (NI)0;
LOC21 = sonslen_295351_850551059(n0);
HEX3Atmp_536499_839829468 = (NI)(LOC21 - ((NI) 1));
res_536502_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* b0;
Ropeobj178006* tmp20;
Tnode292802* LOC24;
if (!(res_536502_839829468 <= HEX3Atmp_536499_839829468)) goto LA23;
i_536421_839829468 = res_536502_839829468;
b0 = (*n0).kindU.S6.sons->data[i_536421_839829468];
tmp20 = getnimnode_535945_839829468(m0);
LOC24 = (Tnode292802*)0;
LOC24 = lastson_295364_850551059(b0);
genobjectfields_536104_839829468(m0, typ0, LOC24, tmp20);
switch ((*b0).kind) {
case ((Tnodekind292020) 85):
{
{
NI LOC28;
LOC28 = (NI)0;
LOC28 = sonslen_295351_850551059(b0);
if (!(LOC28 < ((NI) 2))) goto LA29;
internalerror_196100_155036129((*b0).info, ((NimStringDesc*) &T839829468_204));
}
LA29: ;
{
NI j_536436_839829468;
NI HEX3Atmp_536492_839829468;
NI LOC32;
NI res_536495_839829468;
j_536436_839829468 = (NI)0;
HEX3Atmp_536492_839829468 = (NI)0;
LOC32 = (NI)0;
LOC32 = sonslen_295351_850551059(b0);
HEX3Atmp_536492_839829468 = (NI)(LOC32 - ((NI) 2));
res_536495_839829468 = ((NI) 0);
{
while (1) {
if (!(res_536495_839829468 <= HEX3Atmp_536492_839829468)) goto LA34;
j_536436_839829468 = res_536495_839829468;
{
NI x0;
NI64 LOC39;
NI y0;
NI64 LOC40;
if (!((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kind == ((Tnodekind292020) 44))) goto LA37;
LOC39 = (NI64)0;
LOC39 = getordvalue_320129_3876443242((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kindU.S6.sons->data[((NI) 0)]);
x0 = ((NI) (LOC39));
LOC40 = (NI64)0;
LOC40 = getordvalue_320129_3876443242((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kindU.S6.sons->data[((NI) 1)]);
y0 = ((NI) (LOC40));
{
while (1) {
TY535238 LOC43;
if (!(x0 <= y0)) goto LA42;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = tmp0;
LOC43[1] = rope_178401_2381377266(((NI64) (x0)));
LOC43[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC43, 3);
x0 += ((NI) 1);
} LA42: ;
}
}
goto LA35;
LA37: ;
{
TY535238 LOC45;
NI64 LOC46;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = tmp0;
LOC46 = (NI64)0;
LOC46 = getordvalue_320129_3876443242((*b0).kindU.S6.sons->data[j_536436_839829468]);
LOC45[1] = rope_178401_2381377266(LOC46);
LOC45[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC45, 3);
}
LA35: ;
res_536495_839829468 += ((NI) 1);
} LA34: ;
}
}
}
break;
case ((Tnodekind292020) 88):
{
TY535238 LOC48;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC48[0] = tmp0;
LOC48[1] = rope_178401_2381377266(L0);
LOC48[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC48, 3);
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_205));
}
break;
}
res_536502_839829468 += ((NI) 1);
} LA23: ;
}
}
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
field0 = (*n0).kindU.S4.sym;
{
TY536475 LOC55;
if (!((*field0).kindU.S4.bitsize == ((NI) 0))) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = expr0;
LOC55[1] = gettypedesc_535671_839829468(m0, typ0);
LOC55[2] = (*field0).loc.r;
LOC55[3] = gentypeinfo_535941_839829468(m0, (*field0).typ);
LOC55[4] = makecstring_191638_155036129((*(*field0).name).s);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_206), LOC55, 5);
}
LA53: ;
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_207));
}
break;
}
}
N_NIMCALL(void, genobjectinfo_536506_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0) {
Ropeobj178006* tmp0;
TY532811 LOC12;
Ttype292840* t0;
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA3;
gentypeinfoaux_536027_839829468(m0, typ0, origtype0, name0);
}
goto LA1;
LA3: ;
{
Ropeobj178006* LOC6;
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
gentypeinfoauxbase_535960_839829468(m0, typ0, origtype0, name0, LOC6);
}
LA1: ;
tmp0 = getnimnode_535945_839829468(m0);
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = isimportedcpptype_533476_839829468(typ0);
if (!!(LOC9)) goto LA10;
genobjectfields_536104_839829468(m0, typ0, (*typ0).n, tmp0);
}
LA10: ;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = name0;
LOC12[1] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC12, 2);
t0 = (*typ0).sons->data[((NI) 0)];
{
while (1) {
if (!!((t0 == NIM_NIL))) goto LA14;
t0 = skiptypes_296099_850551059(t0, IL64(211106247215360));
(*t0).flags |= ((NU32)1)<<((((Ttypeflag292431) 5))%(sizeof(NU32)*8));
t0 = (*t0).sons->data[((NI) 0)];
} LA14: ;
}
}
N_NIMCALL(void, gendeepcopyproc_538066_839829468)(Tcgen529027* m0, Tsym292834* s0, Ropeobj178006* result0) {
TY532811 LOC1;
genproc_532951_839829468(m0, s0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = result0;
LOC1[1] = (*s0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_208), LOC1, 2);
}
N_NIMCALL(Ropeobj178006*, gentypeinfo_535941_839829468)(Tcgen529027* m0, Ttype292840* t_535944_839829468) {
Ropeobj178006* result0;
Ttype292840* origtype0;
Ttype292840* t0;
TY178507 LOC1;
Tsym292834* owner0;
Ttype292840* LOC12;
Ropeobj178006* LOC66;
Ropeobj178006* LOC67;
Ropeobj178006* LOC68;
{ result0 = (Ropeobj178006*)0;
origtype0 = t_535944_839829468;
t0 = getuniquetype_528640_2036603609(t_535944_839829468);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rope_178401_2381377266(((NI64) ((*t0).Sup.id)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_127), LOC1, 1);
{
NIM_BOOL LOC4;
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
LOC4 = (NIM_BOOL)0;
LOC4 = containsorincl_268862_2627731572((&(*m0).typeinfomarker), (*t0).Sup.id);
if (!LOC4) goto LA5;
LOC7 = (Ropeobj178006*)0;
LOC7 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX26_178418_2381377266(LOC7, result0);
LOC9 = (Ropeobj178006*)0;
LOC9 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC8, LOC9);
goto BeforeRet;
}
LA5: ;
{
while (1) {
if (!((*t0).kind == ((Ttypekind292244) 13))) goto LA11;
t0 = lastson_295377_850551059(t0);
} LA11: ;
}
LOC12 = (Ttype292840*)0;
LOC12 = skiptypes_296099_850551059(t0, IL64(211106247256320));
owner0 = getmodule_299123_2984716966((*LOC12).owner);
{
Tcgen529027* LOC17;
Ropeobj178006* LOC18;
Ropeobj178006* LOC19;
Ropeobj178006* LOC20;
TY532811 LOC21;
NimStringDesc* LOC22;
Ropeobj178006* LOC23;
Ropeobj178006* LOC24;
Ropeobj178006* LOC25;
if (!!((owner0 == (*m0).module))) goto LA15;
LOC17 = (Tcgen529027*)0;
LOC17 = bmod_529201_3723162438(owner0);
LOC18 = (Ropeobj178006*)0;
LOC18 = gentypeinfo_535941_839829468(LOC17, t0);
LOC19 = (Ropeobj178006*)0;
LOC19 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_129));
LOC20 = (Ropeobj178006*)0;
LOC20 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_130));
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = result0;
LOC22 = (NimStringDesc*)0;
LOC22 = typetostring_320017_3876443242(t0, ((Tprefereddesc320011) 0));
LOC21[1] = rope_178277_2381377266(LOC22);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_131), LOC21, 2);
LOC23 = (Ropeobj178006*)0;
LOC23 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC24 = (Ropeobj178006*)0;
LOC24 = HEX26_178418_2381377266(LOC23, result0);
LOC25 = (Ropeobj178006*)0;
LOC25 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC24, LOC25);
goto BeforeRet;
}
LA15: ;
switch ((*t0).kind) {
case ((Ttypekind292244) 3):
case ((Ttypekind292244) 62):
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
}
break;
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 23):
{
Ropeobj178006* LOC28;
LOC28 = (Ropeobj178006*)0;
LOC28 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
gentypeinfoauxbase_535960_839829468(m0, t0, t0, result0, LOC28);
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC34;
if (!!(((*t0).n == NIM_NIL))) goto LA32;
LOC34 = (Ttype292840*)0;
LOC34 = lastson_295377_850551059(t0);
result0 = gentypeinfo_535941_839829468(m0, LOC34);
}
goto LA30;
LA32: ;
{
NimStringDesc* LOC36;
LOC36 = (NimStringDesc*)0;
LOC36 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC36, ((NimStringDesc*) &T839829468_137));
appendString(LOC36, reprEnum((NI)(*t0).kind, (&NTI292244)));
appendChar(LOC36, 41);
internalerror_196113_155036129(LOC36);
}
LA30: ;
}
break;
case ((Ttypekind292244) 25):
{
{
Ropeobj178006* LOC42;
if (!!(((*t0).callconv == ((Tcallingconvention292002) 8)))) goto LA40;
LOC42 = (Ropeobj178006*)0;
LOC42 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
gentypeinfoauxbase_535960_839829468(m0, t0, t0, result0, LOC42);
}
goto LA38;
LA40: ;
{
Ttype292840* LOC44;
LOC44 = (Ttype292840*)0;
LOC44 = fakeclosuretype_537010_839829468((*t0).owner);
gentupleinfo_536549_839829468(m0, LOC44, result0);
}
LA38: ;
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 22):
{
gentypeinfoaux_536027_839829468(m0, t0, t0, result0);
{
Ropeobj178006* markerproc0;
TY532811 LOC50;
if (!(((Tgcmode169080) 4) <= gselectedgc_169133_2607990831)) goto LA48;
markerproc0 = gentraverseproc_537632_839829468(m0, t0, ((Ttypeinforeason537016) 0));
memset((void*)LOC50, 0, sizeof(LOC50));
LOC50[0] = result0;
LOC50[1] = markerproc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_192), LOC50, 2);
}
LA48: ;
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 20):
{
gentypeinfoaux_536027_839829468(m0, t0, t0, result0);
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
genarrayinfo_537005_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 19):
{
gensetinfo_536867_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 14):
{
genenuminfo_536597_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 17):
{
genobjectinfo_536506_839829468(m0, t0, origtype0, result0);
}
break;
case ((Ttypekind292244) 18):
{
gentupleinfo_536549_839829468(m0, t0, result0);
}
break;
default:
{
NimStringDesc* LOC58;
LOC58 = (NimStringDesc*)0;
LOC58 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC58, ((NimStringDesc*) &T839829468_137));
appendString(LOC58, reprEnum((NI)(*t0).kind, (&NTI292244)));
appendChar(LOC58, 41);
internalerror_196113_155036129(LOC58);
}
break;
}
{
if (!!(((*t0).deepcopy == NIM_NIL))) goto LA61;
gendeepcopyproc_538066_839829468(m0, (*t0).deepcopy, result0);
}
goto LA59;
LA61: ;
{
if (!!(((*origtype0).deepcopy == NIM_NIL))) goto LA64;
gendeepcopyproc_538066_839829468(m0, (*origtype0).deepcopy, result0);
}
goto LA59;
LA64: ;
LA59: ;
LOC66 = (Ropeobj178006*)0;
LOC66 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC67 = (Ropeobj178006*)0;
LOC67 = HEX26_178418_2381377266(LOC66, result0);
LOC68 = (Ropeobj178006*)0;
LOC68 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC67, LOC68);
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, localdebuginfo_538449_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* a0;
TY535235 LOC16;
NimStringDesc* LOC17;
{ {
if (!!(((163840 & (*p0).options) == 163840))) goto LA3;
goto BeforeRet;
}
LA3: ;
{
Ttype292840* LOC7;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*s0).typ, IL64(211106240964864));
if (!((*LOC7).kind == ((Ttypekind292244) 27) || (*LOC7).kind == ((Ttypekind292244) 48))) goto LA8;
goto BeforeRet;
}
LA8: ;
a0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), (*s0).loc.r);
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*s0).kind == ((Tsymkind292435) 3));
if (!(LOC12)) goto LA13;
LOC12 = ccgintroducedptr_533609_839829468(s0);
LA13: ;
if (!LOC12) goto LA14;
a0 = (*s0).loc.r;
}
LA14: ;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rope_178401_2381377266(((NI64) ((*p0).maxframelen)));
LOC17 = (NimStringDesc*)0;
LOC17 = nsuNormalize((*(*s0).name).s);
LOC16[1] = makecstring_191638_155036129(LOC17);
LOC16[2] = a0;
LOC16[3] = gentypeinfo_535941_839829468((*p0).module, (*s0).loc.t);
linef_532700_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_126), LOC16, 4);
(*p0).maxframelen += ((NI) 1);
(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].framelen += ((NI) 1);
}BeforeRet: ;
}
N_NIMCALL(void, assignlocalvar_538614_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* decl0;
Ropeobj178006* LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006*)0;
LOC1 = localvardecl_538532_839829468(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX26_178447_2381377266(LOC1, ((NimStringDesc*) &T839829468_125));
decl0 = HEX26_178447_2381377266(LOC2, tnl_176644_4151366050);
line_532690_839829468(p0, ((Tcprocsection529011) 0), decl0);
localdebuginfo_538449_839829468(p0, s0);
}
N_NIMCALL(void, initlocalvar_538398_839829468)(Tcproc529021* p0, Tsym292834* v0, NIM_BOOL immediateasgn0) {
{
if (!!((((*v0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0))) goto LA3;
{
if (!!(immediateasgn0)) goto LA7;
constructloc_538388_839829468(p0, (*v0).loc, NIM_FALSE);
}
LA7: ;
}
LA3: ;
}
N_NIMCALL(void, fillresult_533865_839829468)(Tsym292834* param0) {
TY533289 LOC1;
Ropeobj178006* LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_210), LOC1, 0);
fillloc_532282_839829468((&(*param0).loc), ((Tlockind292808) 4), (*param0).typ, LOC2, ((Tstorageloc292812) 2));
{
NIM_BOOL LOC5;
Tctypekind529007 LOC6;
LOC5 = (NIM_BOOL)0;
LOC6 = (Tctypekind529007)0;
LOC6 = mapreturntype_533445_839829468((*param0).typ);
LOC5 = !((LOC6 == ((Tctypekind529007) 17)));
if (!(LOC5)) goto LA7;
LOC5 = isinvalidreturntype_533548_839829468((*param0).typ);
LA7: ;
if (!LOC5) goto LA8;
(*param0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
(*param0).loc.s = ((Tstorageloc292812) 0);
}
LA8: ;
}
N_NIMCALL(void, assignparam_538994_839829468)(Tcproc529021* p0, Tsym292834* s0) {
localdebuginfo_538449_839829468(p0, s0);
}
N_NIMCALL(void, closuresetup_560158_839829468)(Tcproc529021* p0, Tsym292834* prc0) {
Tnode292802* ls0;
Tnode292802* LOC5;
Tsym292834* env0;
TY532811 LOC10;
{ {
if (!!((((*(*prc0).typ).flags &(1U<<((NU)(((Ttypeflag292431) 11))&31U)))!=0))) goto LA3;
goto BeforeRet;
}
LA3: ;
LOC5 = (Tnode292802*)0;
LOC5 = HEX5BHEX5D_293238_850551059((*prc0).ast, ((NI) 3));
ls0 = lastson_295364_850551059(LOC5);
{
if (!!(((*ls0).kind == ((Tnodekind292020) 3)))) goto LA8;
internalerror_196100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_211));
}
LA8: ;
env0 = (*ls0).kindU.S4.sym;
assignlocalvar_538614_839829468(p0, env0);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468((*env0).loc);
LOC10[1] = gettypedesc_535671_839829468((*p0).module, (*env0).typ);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_212), LOC10, 2);
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, initgcframe_538435_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = (*p0).gcframetype;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_217), LOC5, 1);
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, initframe_560140_839829468)(Tcproc529021* p0, Ropeobj178006* procname0, Ropeobj178006* filename0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_218));
{
Ropeobj178006* LOC6;
TY535235 LOC7;
if (!(((NI) 0) < (*p0).maxframelen)) goto LA4;
LOC6 = (Ropeobj178006*)0;
LOC6 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_219));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = procname0;
LOC7[1] = filename0;
LOC7[2] = rope_178401_2381377266(((NI64) ((*p0).maxframelen)));
LOC7[3] = rope_178401_2381377266(((NI64) ((*p0).blocks->data[((NI) 0)].framelen)));
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_220), LOC7, 4);
}
goto LA2;
LA4: ;
{
TY532811 LOC9;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = procname0;
LOC9[1] = filename0;
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_221), LOC9, 2);
}
LA2: ;
return result0;
}
N_NIMCALL(void, appcg_532648_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
add_178482_2381377266(LOC1, LOC2);
}
N_NIMCALL(Ropeobj178006*, deinitgcframe_538441_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY533289 LOC5;
if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_225), LOC5, 0);
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, deinitframe_560150_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
TY533289 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_226), LOC1, 0);
return result0;
}
N_NIMCALL(void, genprocaux_560284_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
Tcproc529021* p0;
Ropeobj178006* header0;
Ropeobj178006* returnstmt0;
Tnode292802* LOC51;
Ropeobj178006* generatedproc0;
p0 = newproc_529206_3723162438(prc0, m0);
header0 = genprocheader_535867_839829468(m0, prc0);
returnstmt0 = NIM_NIL;
{
NIM_BOOL LOC3;
Tsym292834* res0;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
if (!(LOC3)) goto LA4;
LOC3 = !(((*(*prc0).typ).sons->data[((NI) 0)] == NIM_NIL));
LA4: ;
if (!LOC3) goto LA5;
{
NI LOC9;
LOC9 = (NI)0;
LOC9 = len_293081_850551059((*prc0).ast);
if (!(LOC9 <= ((NI) 7))) goto LA10;
internalerror_196100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_120));
}
LA10: ;
res0 = (*(*(*prc0).ast).kindU.S6.sons->data[((NI) 7)]).kindU.S4.sym;
{
NIM_BOOL LOC14;
TY178507 LOC34;
LOC14 = (NIM_BOOL)0;
LOC14 = isinvalidreturntype_533548_839829468((*(*prc0).typ).sons->data[((NI) 0)]);
if (!!(LOC14)) goto LA15;
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA19;
(*res0).flags |= ((NU32)1)<<((((Tsymflag292184) 12))%(sizeof(NU32)*8));
}
LA19: ;
{
NIM_BOOL LOC23;
NIM_BOOL LOC24;
NIM_BOOL LOC26;
Tnode292802* val0;
Tnode292802* LOC29;
Ropeobj178006* decl0;
Tloc292816 a0;
TY532811 LOC32;
LOC23 = (NIM_BOOL)0;
LOC24 = (NIM_BOOL)0;
LOC24 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
if (!(LOC24)) goto LA25;
LOC26 = (NIM_BOOL)0;
LOC26 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC26) goto LA27;
LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA27: ;
LOC24 = LOC26;
LA25: ;
LOC23 = LOC24;
if (!(LOC23)) goto LA28;
LOC29 = (Tnode292802*)0;
LOC29 = getbody_335227_1724185294(prc0);
val0 = easyresultasgn_560191_839829468(LOC29);
LOC23 = !((val0 == NIM_NIL));
LA28: ;
if (!LOC23) goto LA30;
decl0 = localvardecl_538532_839829468(p0, res0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexprsingleuse_539289_839829468(p0, val0, (&a0));
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = decl0;
LOC32[1] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC32, 2);
}
goto LA21;
LA30: ;
{
assignlocalvar_538614_839829468(p0, res0);
initlocalvar_538398_839829468(p0, res0, NIM_FALSE);
}
LA21: ;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = rdloc_538188_839829468((*res0).loc);
returnstmt0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_209), LOC34, 1);
}
goto LA12;
LA15: ;
{
fillresult_533865_839829468(res0);
assignparam_538994_839829468(p0, res0);
{
Ttype292840* LOC38;
LOC38 = (Ttype292840*)0;
LOC38 = skiptypes_296099_850551059((*res0).typ, IL64(211106232576256));
if (!((*LOC38).kind == ((Ttypekind292244) 16))) goto LA39;
(*res0).loc.s = ((Tstorageloc292812) 0);
}
LA39: ;
}
LA12: ;
}
LA5: ;
{
NI i_560627_839829468;
NI HEX3Atmp_560743_839829468;
NI LOC42;
NI res_560746_839829468;
i_560627_839829468 = (NI)0;
HEX3Atmp_560743_839829468 = (NI)0;
LOC42 = (NI)0;
LOC42 = sonslen_295351_850551059((*(*prc0).typ).n);
HEX3Atmp_560743_839829468 = (NI)(LOC42 - ((NI) 1));
res_560746_839829468 = ((NI) 1);
{
while (1) {
if (!(res_560746_839829468 <= HEX3Atmp_560743_839829468)) goto LA44;
i_560627_839829468 = res_560746_839829468;
{
Tsym292834* param0;
param0 = (*(*(*(*prc0).typ).n).kindU.S6.sons->data[i_560627_839829468]).kindU.S4.sym;
{
NIM_BOOL LOC48;
LOC48 = (NIM_BOOL)0;
LOC48 = iscompiletimeonly_328706_3876443242((*param0).typ);
if (!LOC48) goto LA49;
goto LA45;
}
LA49: ;
assignparam_538994_839829468(p0, param0);
} LA45: ;
res_560746_839829468 += ((NI) 1);
} LA44: ;
}
}
closuresetup_560158_839829468(p0, prc0);
LOC51 = (Tnode292802*)0;
LOC51 = getbody_335227_1724185294(prc0);
genstmts_539244_839829468(p0, LOC51);
generatedproc0 = (Ropeobj178006*)0;
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0)) goto LA54;
{
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0)) goto LA58;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_213), header0);
}
LA58: ;
}
LA54: ;
{
TY535235 LOC68;
Ropeobj178006** LOC69;
Ropeobj178006** LOC70;
Ropeobj178006** LOC71;
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)) goto LA62;
{
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0)) goto LA66;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_214), header0);
}
LA66: ;
memset((void*)LOC68, 0, sizeof(LOC68));
LOC68[0] = header0;
LOC69 = (Ropeobj178006**)0;
LOC69 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC68[1] = (*LOC69);
LOC70 = (Ropeobj178006**)0;
LOC70 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC68[2] = (*LOC70);
LOC71 = (Ropeobj178006**)0;
LOC71 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC68[3] = (*LOC71);
generatedproc0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_215), LOC68, 4);
}
goto LA60;
LA62: ;
{
TY178507 LOC73;
Ropeobj178006* LOC74;
Ropeobj178006** LOC93;
Ropeobj178006** LOC94;
Ropeobj178006* LOC101;
TY533289 LOC107;
Ropeobj178006* LOC108;
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = header0;
generatedproc0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_216), LOC73, 1);
LOC74 = (Ropeobj178006*)0;
LOC74 = initgcframe_538435_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC74);
{
Ropeobj178006** LOC79;
Ropeobj178006* procname0;
Ropeobj178006* LOC80;
Ropeobj178006* LOC81;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA77;
LOC79 = (Ropeobj178006**)0;
LOC79 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&generatedproc0, (*LOC79));
procname0 = makecstring_191638_155036129((*(*prc0).name).s);
LOC80 = (Ropeobj178006*)0;
LOC80 = quotedfilename_196818_155036129((*prc0).info);
LOC81 = (Ropeobj178006*)0;
LOC81 = initframe_560140_839829468(p0, procname0, LOC80);
add_178482_2381377266(&generatedproc0, LOC81);
}
goto LA75;
LA77: ;
{
Ropeobj178006** LOC83;
LOC83 = (Ropeobj178006**)0;
LOC83 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&generatedproc0, (*LOC83));
}
LA75: ;
{
TY533289 LOC88;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 19))&31U)))!=0)) goto LA86;
memset((void*)LOC88, 0, sizeof(LOC88));
appcg_532648_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_222), LOC88, 0);
}
LA86: ;
{
if (!(*p0).beforeretneeded) goto LA91;
add_178487_2381377266(&generatedproc0, ((NimStringDesc*) &T839829468_223));
}
LA91: ;
LOC93 = (Ropeobj178006**)0;
LOC93 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
add_178482_2381377266(&generatedproc0, (*LOC93));
LOC94 = (Ropeobj178006**)0;
LOC94 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(&generatedproc0, (*LOC94));
{
TY533289 LOC99;
Ropeobj178006* LOC100;
if (!(*p0).beforeretneeded) goto LA97;
memset((void*)LOC99, 0, sizeof(LOC99));
LOC100 = (Ropeobj178006*)0;
LOC100 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_224), LOC99, 0);
add_178482_2381377266(&generatedproc0, LOC100);
}
LA97: ;
LOC101 = (Ropeobj178006*)0;
LOC101 = deinitgcframe_538441_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC101);
{
Ropeobj178006* LOC106;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA104;
LOC106 = (Ropeobj178006*)0;
LOC106 = deinitframe_560150_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC106);
}
LA104: ;
add_178482_2381377266(&generatedproc0, returnstmt0);
memset((void*)LOC107, 0, sizeof(LOC107));
LOC108 = (Ropeobj178006*)0;
LOC108 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_227), LOC107, 0);
add_178482_2381377266(&generatedproc0, LOC108);
}
LA60: ;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
}
N_NIMCALL(Tcgen529027*, findpendingmodule_532241_839829468)(Tcgen529027* m0, Tsym292834* s0) {
Tcgen529027* result0;
Tsym292834* ms0;
result0 = (Tcgen529027*)0;
ms0 = getmodule_299123_2984716966(s0);
result0 = gmodules_529170_3723162438->data[(*ms0).position];
return result0;
}
N_NIMCALL(NIM_BOOL, isgetprocaddr_559442_839829468)(Tlib292820* lib0) {
NIM_BOOL result0;
Tnode292802* n0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
result0 = (NIM_BOOL)0;
n0 = (*lib0).path;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = ((*n0).kind == ((Tnodekind292020) 27) || (*n0).kind == ((Tnodekind292020) 29) || (*n0).kind == ((Tnodekind292020) 30) || (*n0).kind == ((Tnodekind292020) 31) || (*n0).kind == ((Tnodekind292020) 26) || (*n0).kind == ((Tnodekind292020) 28) || (*n0).kind == ((Tnodekind292020) 32));
if (!(LOC2)) goto LA3;
LOC2 = !(((*n0).typ == NIM_NIL));
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA4;
LOC1 = ((*(*n0).typ).kind == ((Ttypekind292244) 26) || (*(*n0).typ).kind == ((Ttypekind292244) 25));
LA4: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, initlocexpr_539283_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0) {
initloc_532273_839829468(result0, ((Tlockind292808) 0), (*e0).typ, ((Tstorageloc292812) 0));
expr_539248_839829468(p0, e0, result0);
}
N_NIMCALL(void, loaddynamiclib_559480_839829468)(Tcgen529027* m0, Tlib292820* lib0) {
{
Ropeobj178006* tmp0;
TY178507 LOC5;
if (!!((*lib0).generated)) goto LA3;
(*lib0).generated = NIM_TRUE;
tmp0 = gettempname_533596_839829468(m0);
asgnRefNoCycle((void**) (&(*lib0).name), tmp0);
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_228), LOC5, 1);
{
TY135002* s0;
Ropeobj178006* loadlib0;
TY532811 LOC18;
if (!((*(*lib0).path).kind >= ((Tnodekind292020) 20) && (*(*lib0).path).kind <= ((Tnodekind292020) 22))) goto LA8;
s0 = (TY135002*) newSeq((&NTI135002), 0);
libcandidates_170605_2607990831((*(*lib0).path).kindU.S3.strval, (&s0));
rawmessage_194612_155036129(((Tmsgkind191002) 286), (*(*lib0).path).kindU.S3.strval);
loadlib0 = NIM_NIL;
{
NI i_559847_839829468;
NI HEX3Atmp_559902_839829468;
NI res_559905_839829468;
i_559847_839829468 = (NI)0;
HEX3Atmp_559902_839829468 = (NI)0;
HEX3Atmp_559902_839829468 = (s0 ? (s0->Sup.len-1) : -1);
res_559905_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC17;
if (!(res_559905_839829468 <= HEX3Atmp_559902_839829468)) goto LA12;
i_559847_839829468 = res_559905_839829468;
(*m0).labels += ((NI) 1);
{
if (!(((NI) 0) < i_559847_839829468)) goto LA15;
add_178487_2381377266(&loadlib0, ((NimStringDesc*) &T839829468_229));
}
LA15: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = tmp0;
LOC17[1] = getstrlit_549468_839829468(m0, s0->data[i_559847_839829468]);
appcg_532632_839829468(m0, &loadlib0, ((NimStringDesc*) &T839829468_230), LOC17, 2);
res_559905_839829468 += ((NI) 1);
} LA12: ;
}
}
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = loadlib0;
LOC18[1] = getstrlit_549468_839829468(m0, (*(*lib0).path).kindU.S3.strval);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_231), LOC18, 2);
}
goto LA6;
LA8: ;
{
Tcproc529021* p0;
Tloc292816 dest0;
Ropeobj178006** LOC20;
Ropeobj178006** LOC21;
Ropeobj178006** LOC22;
TY532811 LOC23;
p0 = newproc_529206_3723162438(NIM_NIL, m0);
(*p0).options = ((*p0).options & ~ 163840);
memset((void*)(&dest0), 0, sizeof(dest0));
initlocexpr_539283_839829468(p0, (*lib0).path, (&dest0));
LOC20 = (Ropeobj178006**)0;
LOC20 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], (*LOC20));
LOC21 = (Ropeobj178006**)0;
LOC21 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 16))- 0], (*LOC21));
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 16))- 0], (*LOC22));
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = tmp0;
LOC23[1] = rdloc_538188_839829468(dest0);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_232), LOC23, 2);
}
LA6: ;
}
LA3: ;
{
if (!((*lib0).name == NIM_NIL)) goto LA26;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_233));
}
LA26: ;
}
N_NIMCALL(Ropeobj178006*, mangledynlibproc_538816_839829468)(Tsym292834* sym0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 16))&31U)))!=0)) goto LA3;
result0 = rope_178277_2381377266((*(*sym0).name).s);
}
goto LA1;
LA3: ;
{
TY178507 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266(((NI64) ((*sym0).Sup.id)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_234), LOC6, 1);
}
LA1: ;
return result0;
}
N_NIMCALL(void, symindynamiclib_559929_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Tlib292820* lib0;
NIM_BOOL iscall0;
Ropeobj178006* extname0;
Ropeobj178006* tmp0;
TY532811 LOC43;
lib0 = (*sym0).annex;
iscall0 = isgetprocaddr_559442_839829468(lib0);
extname0 = (*sym0).loc.r;
{
if (!!(iscall0)) goto LA3;
loaddynamiclib_559480_839829468(m0, lib0);
}
LA3: ;
tmp0 = mangledynlibproc_538816_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0);
asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL);
(*m0).labels += ((NI) 2);
{
Tnode292802* n0;
Tloc292816 a0;
Tnode292802* LOC9;
Ropeobj178006* params0;
Ropeobj178006* LOC10;
Ropeobj178006* load0;
TY535235 LOC17;
NimStringDesc* LOC18;
Tnode292802* last0;
NimStringDesc* idx0;
if (!iscall0) goto LA7;
n0 = (*lib0).path;
memset((void*)(&a0), 0, sizeof(a0));
LOC9 = (Tnode292802*)0;
LOC9 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
initlocexpr_539283_839829468((*m0).initproc, LOC9, (&a0));
LOC10 = (Ropeobj178006*)0;
LOC10 = rdloc_538188_839829468(a0);
params0 = HEX26_178447_2381377266(LOC10, ((NimStringDesc*) &T839829468_118));
{
NI i_559964_839829468;
NI HEX3Atmp_560025_839829468;
NI LOC12;
NI res_560028_839829468;
i_559964_839829468 = (NI)0;
HEX3Atmp_560025_839829468 = (NI)0;
LOC12 = (NI)0;
LOC12 = len_293081_850551059(n0);
HEX3Atmp_560025_839829468 = (NI)(LOC12 - ((NI) 2));
res_560028_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* LOC15;
Ropeobj178006* LOC16;
if (!(res_560028_839829468 <= HEX3Atmp_560025_839829468)) goto LA14;
i_559964_839829468 = res_560028_839829468;
LOC15 = (Tnode292802*)0;
LOC15 = HEX5BHEX5D_293238_850551059(n0, i_559964_839829468);
initlocexpr_539283_839829468((*m0).initproc, LOC15, (&a0));
LOC16 = (Ropeobj178006*)0;
LOC16 = rdloc_538188_839829468(a0);
add_178482_2381377266(¶ms0, LOC16);
add_178487_2381377266(¶ms0, ((NimStringDesc*) &T839829468_110));
res_560028_839829468 += ((NI) 1);
} LA14: ;
}
}
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = tmp0;
LOC17[1] = gettypedesc_535671_839829468(m0, (*sym0).typ);
LOC17[2] = params0;
LOC18 = (NimStringDesc*)0;
LOC18 = HEX24_178856_2381377266(extname0);
LOC17[3] = makecstring_191638_155036129(LOC18);
load0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_235), LOC17, 4);
last0 = lastson_295364_850551059(n0);
{
if (!((*last0).kind == ((Tnodekind292020) 58))) goto LA21;
last0 = (*last0).kindU.S6.sons->data[((NI) 1)];
}
LA21: ;
{
NimStringDesc* LOC27;
if (!!(((*last0).kind == ((Tnodekind292020) 20)))) goto LA25;
LOC27 = (NimStringDesc*)0;
LOC27 = HEX24_196185_1689653243(T839829468_236);
internalerror_196113_155036129(LOC27);
}
LA25: ;
idx0 = (*last0).kindU.S3.strval;
{
Ropeobj178006** LOC32;
if (!((idx0 ? idx0->Sup.len : 0) == ((NI) 0))) goto LA30;
LOC32 = (Ropeobj178006**)0;
LOC32 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC32, load0);
}
goto LA28;
LA30: ;
{
NIM_BOOL LOC34;
LOC34 = (NIM_BOOL)0;
LOC34 = ((idx0 ? idx0->Sup.len : 0) == ((NI) 1));
if (!(LOC34)) goto LA35;
LOC34 = (((NU8)(idx0->data[((NI) 0)])) >= ((NU8)(48)) && ((NU8)(idx0->data[((NI) 0)])) <= ((NU8)(57)));
LA35: ;
if (!LOC34) goto LA36;
add_178482_2381377266(&(*m0).extensionloaders[(((NU8)(idx0->data[((NI) 0)])))- 48], load0);
}
goto LA28;
LA36: ;
{
NimStringDesc* LOC39;
LOC39 = (NimStringDesc*)0;
LOC39 = rawNewString(idx0->Sup.len + 13);
appendString(LOC39, ((NimStringDesc*) &T839829468_237));
appendString(LOC39, idx0);
internalerror_196100_155036129((*sym0).info, LOC39);
}
LA28: ;
}
goto LA5;
LA7: ;
{
TY535235 LOC41;
NimStringDesc* LOC42;
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = tmp0;
LOC41[1] = gettypedesc_535671_839829468(m0, (*sym0).typ);
LOC41[2] = (*lib0).name;
LOC42 = (NimStringDesc*)0;
LOC42 = HEX24_178856_2381377266(extname0);
LOC41[3] = makecstring_191638_155036129(LOC42);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_238), LOC41, 4);
}
LA5: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = (*sym0).loc.r;
LOC43[1] = gettypedesc_535671_839829468(m0, (*sym0).loc.t);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_239), LOC43, 2);
}
N_NIMCALL(void, symindynamiclibpartial_560071_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
asgnRefNoCycle((void**) (&(*sym0).loc.r), mangledynlibproc_538816_839829468(sym0));
asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL);
}
N_NIMCALL(void, genprocnoforward_560906_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
{ fillprocloc_539201_839829468(prc0);
useheader_532369_839829468(m0, prc0);
{
Ropeobj178006* LOC5;
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 7))&15U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = cgsym_532403_839829468(m0, (*(*prc0).name).s);
goto BeforeRet;
}
LA3: ;
genprocprototype_539254_839829468(m0, prc0);
{
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA8;
}
goto LA6;
LA8: ;
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA11;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*prc0).Sup.id);
if (!!(LOC15)) goto LA16;
genprocaux_560284_839829468(m0, prc0);
}
LA16: ;
}
goto LA6;
LA11: ;
{
Tcgen529027* q0;
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA19;
q0 = findpendingmodule_532241_839829468(m0, prc0);
{
NIM_BOOL LOC23;
NIM_BOOL LOC25;
LOC23 = (NIM_BOOL)0;
LOC23 = !((q0 == NIM_NIL));
if (!(LOC23)) goto LA24;
LOC25 = (NIM_BOOL)0;
LOC25 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id);
LOC23 = !(LOC25);
LA24: ;
if (!LOC23) goto LA26;
symindynamiclib_559929_839829468(q0, prc0);
}
goto LA21;
LA26: ;
{
symindynamiclibpartial_560071_839829468(m0, prc0);
}
LA21: ;
}
goto LA6;
LA19: ;
{
Tcgen529027* q0;
if (!!((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0))) goto LA30;
q0 = findpendingmodule_532241_839829468(m0, prc0);
{
NIM_BOOL LOC34;
NIM_BOOL LOC36;
LOC34 = (NIM_BOOL)0;
LOC34 = !((q0 == NIM_NIL));
if (!(LOC34)) goto LA35;
LOC36 = (NIM_BOOL)0;
LOC36 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id);
LOC34 = !(LOC36);
LA35: ;
if (!LOC34) goto LA37;
genprocaux_560284_839829468(q0, prc0);
}
LA37: ;
}
goto LA6;
LA30: ;
LA6: ;
}BeforeRet: ;
}
N_NIMCALL(void, genproc_532951_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
{ {
NIM_BOOL LOC3;
NIM_BOOL LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 26))&31U)))!=0);
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = isactivated_561431_839829468(prc0);
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
goto BeforeRet;
}
LA6: ;
fillprocloc_539201_839829468(prc0);
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 4))&31U)))!=0)) goto LA10;
addforwardedproc_532203_839829468(m0, prc0);
}
goto LA8;
LA10: ;
{
genprocnoforward_560906_839829468(m0, prc0);
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC16 = ((65600 & (*prc0).flags) == 64);
if (!(LOC16)) goto LA17;
LOC16 = !((generatedheader_532201_839829468 == NIM_NIL));
LA17: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA18;
LOC15 = !((((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0));
LA18: ;
if (!LOC15) goto LA19;
genprocprototype_539254_839829468(generatedheader_532201_839829468, prc0);
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA23;
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = containsorincl_268862_2627731572((&(*generatedheader_532201_839829468).declaredthings), (*prc0).Sup.id);
if (!!(LOC27)) goto LA28;
genprocaux_560284_839829468(generatedheader_532201_839829468, prc0);
}
LA28: ;
}
LA23: ;
}
LA19: ;
}
LA8: ;
}BeforeRet: ;
}
static N_INLINE(NIM_BOOL, emulatedthreadvars_532949_839829468)(void) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((71303168 & ~ gglobaloptions_169130_2607990831)==0);
return result0;
}
N_NIMCALL(void, declarethreadvar_538676_839829468)(Tcgen529027* m0, Tsym292834* s0, NIM_BOOL isextern0) {
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = emulatedthreadvars_532949_839829468();
if (!LOC3) goto LA4;
{
NIM_BOOL LOC8;
TY532811 LOC11;
LOC8 = (NIM_BOOL)0;
LOC8 = containsorincl_268862_2627731572((&nimtvdeclared_538675_839829468), (*s0).Sup.id);
if (!!(LOC8)) goto LA9;
nimtvdeps_538674_839829468 = (Ttypeseq292836*) incrSeqV2(&(nimtvdeps_538674_839829468)->Sup, sizeof(Ttype292840*));
asgnRefNoCycle((void**) (&nimtvdeps_538674_839829468->data[nimtvdeps_538674_839829468->Sup.len]), (*s0).loc.t);
++nimtvdeps_538674_839829468->Sup.len;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535671_839829468(m0, (*s0).loc.t);
LOC11[1] = (*s0).loc.r;
addf_179205_2381377266(&nimtv_538656_839829468, ((NimStringDesc*) &T839829468_54), LOC11, 2);
}
LA9: ;
}
goto LA1;
LA4: ;
{
Ropeobj178006* LOC21;
TY178507 LOC22;
{
if (!isextern0) goto LA15;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_240));
}
LA15: ;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 22))&63U)))!=0)) goto LA19;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_241));
}
LA19: ;
LOC21 = (Ropeobj178006*)0;
LOC21 = gettypedesc_535671_839829468(m0, (*s0).loc.t);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC21);
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = (*s0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC22, 1);
}
LA1: ;
}
N_NIMCALL(void, genvarprototypeaux_544254_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Ropeobj178006* LOC1;
{ useheader_532369_839829468(m0, sym0);
LOC1 = (Ropeobj178006*)0;
LOC1 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 3), (*sym0).typ, LOC1, ((Tstorageloc292812) 3));
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0);
if (LOC4) goto LA5;
LOC4 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LA5: ;
if (!LOC4) goto LA6;
goto BeforeRet;
}
LA6: ;
{
if (!!(((*(*sym0).owner).Sup.id == (*(*m0).module).Sup.id))) goto LA10;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA14;
declarethreadvar_538676_839829468(m0, sym0, NIM_TRUE);
}
goto LA12;
LA14: ;
{
Ropeobj178006* LOC17;
TY178507 LOC30;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_240));
LOC17 = (Ropeobj178006*)0;
LOC17 = gettypedesc_535671_839829468(m0, (*sym0).loc.t);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC17);
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA20;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_53));
}
LA20: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA24;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_121));
}
LA24: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA28;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_122));
}
LA28: ;
memset((void*)LOC30, 0, sizeof(LOC30));
LOC30[0] = (*sym0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC30, 1);
}
LA12: ;
}
LA10: ;
}BeforeRet: ;
}
N_NIMCALL(void, genvarprototype_539236_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
genvarprototypeaux_544254_839829468(m0, sym0);
}
N_NIMCALL(Ropeobj178006*, cgsym_532403_839829468)(Tcgen529027* m0, NimStringDesc* name0) {
Ropeobj178006* result0;
Tsym292834* sym0;
result0 = (Ropeobj178006*)0;
sym0 = getcompilerproc_338746_3937434831(name0);
{
if (!!((sym0 == NIM_NIL))) goto LA3;
switch ((*sym0).kind) {
case ((Tsymkind292435) 12):
case ((Tsymkind292435) 13):
case ((Tsymkind292435) 15):
case ((Tsymkind292435) 14):
{
genproc_532951_839829468(m0, sym0);
}
break;
case ((Tsymkind292435) 8):
case ((Tsymkind292435) 11):
case ((Tsymkind292435) 9):
{
genvarprototype_539236_839829468(m0, sym0);
}
break;
case ((Tsymkind292435) 7):
{
Ropeobj178006* LOC8;
LOC8 = (Ropeobj178006*)0;
LOC8 = gettypedesc_535671_839829468(m0, (*sym0).typ);
}
break;
default:
{
NimStringDesc* LOC10;
LOC10 = (NimStringDesc*)0;
LOC10 = rawNewString(name0->Sup.len + reprEnum((NI)(*sym0).kind, (&NTI292435))->Sup.len + 9);
appendString(LOC10, ((NimStringDesc*) &T839829468_243));
appendString(LOC10, name0);
appendString(LOC10, ((NimStringDesc*) &T839829468_244));
appendString(LOC10, reprEnum((NI)(*sym0).kind, (&NTI292435)));
internalerror_196113_155036129(LOC10);
}
break;
}
}
goto LA1;
LA3: ;
{
rawmessage_194612_155036129(((Tmsgkind191002) 68), name0);
}
LA1: ;
result0 = (*sym0).loc.r;
return result0;
}
N_NIMCALL(Ropeobj178006*, ropecg_532407_839829468)(Tcgen529027* m0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* result0;
NI i0;
NI length0;
NI num0;
result0 = (Ropeobj178006*)0;
i0 = ((NI) 0);
length0 = (frmt0 ? frmt0->Sup.len : 0);
result0 = NIM_NIL;
num0 = ((NI) 0);
{
while (1) {
NI start0;
if (!(i0 < length0)) goto LA2;
{
if (!((NU8)(frmt0->data[i0]) == (NU8)(36))) goto LA5;
i0 += ((NI) 1);
switch (((NU8)(frmt0->data[i0]))) {
case 36:
{
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_19));
i0 += ((NI) 1);
}
break;
case 35:
{
i0 += ((NI) 1);
add_178482_2381377266(&result0, args0[num0]);
num0 += ((NI) 1);
}
break;
case 48 ... 57:
{
NI j0;
j0 = ((NI) 0);
{
while (1) {
j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48));
i0 += ((NI) 1);
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = (length0 <= i0);
if (LOC14) goto LA15;
LOC14 = !((((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57))));
LA15: ;
if (!LOC14) goto LA16;
goto LA10;
}
LA16: ;
}
} LA10: ;
num0 = j0;
{
NimStringDesc* LOC22;
NimStringDesc* LOC23;
if (!((NI)((args0Len0-1) + ((NI) 1)) < j0)) goto LA20;
LOC22 = (NimStringDesc*)0;
LOC23 = (NimStringDesc*)0;
LOC23 = nimIntToStr(j0);
LOC22 = rawNewString(LOC23->Sup.len + 30);
appendString(LOC22, ((NimStringDesc*) &T839829468_20));
appendString(LOC22, LOC23);
internalerror_196113_155036129(LOC22);
}
LA20: ;
add_178482_2381377266(&result0, args0[(NI)(j0 - ((NI) 1))]);
}
break;
case 110:
{
{
if (!!(((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 10))&31U)))!=0))) goto LA27;
add_178482_2381377266(&result0, rnl_178903_2381377266);
}
LA27: ;
i0 += ((NI) 1);
}
break;
case 78:
{
add_178482_2381377266(&result0, rnl_178903_2381377266);
i0 += ((NI) 1);
}
break;
default:
{
NimStringDesc* LOC31;
LOC31 = (NimStringDesc*)0;
LOC31 = rawNewString(31);
appendString(LOC31, ((NimStringDesc*) &T839829468_20));
appendChar(LOC31, frmt0->data[i0]);
internalerror_196113_155036129(LOC31);
}
break;
}
}
goto LA3;
LA5: ;
{
NIM_BOOL LOC33;
NI j0;
NimStringDesc* ident0;
Ropeobj178006* LOC39;
LOC33 = (NIM_BOOL)0;
LOC33 = ((NU8)(frmt0->data[i0]) == (NU8)(35));
if (!(LOC33)) goto LA34;
LOC33 = (((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(97)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(122)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(65)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(90)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(95)));
LA34: ;
if (!LOC33) goto LA35;
i0 += ((NI) 1);
j0 = i0;
{
while (1) {
if (!(((NU8)(frmt0->data[j0])) >= ((NU8)(97)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(122)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(65)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(90)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(57)) || ((NU8)(frmt0->data[j0])) == ((NU8)(95)))) goto LA38;
j0 += ((NI) 1);
} LA38: ;
}
ident0 = copyStrLast(frmt0, i0, (NI)(j0 - ((NI) 1)));
i0 = j0;
LOC39 = (Ropeobj178006*)0;
LOC39 = cgsym_532403_839829468(m0, ident0);
add_178482_2381377266(&result0, LOC39);
}
goto LA3;
LA35: ;
{
NIM_BOOL LOC41;
NI j0;
NimStringDesc* LOC47;
Ropeobj178006* LOC48;
LOC41 = (NIM_BOOL)0;
LOC41 = ((NU8)(frmt0->data[i0]) == (NU8)(35));
if (!(LOC41)) goto LA42;
LOC41 = ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(36));
LA42: ;
if (!LOC41) goto LA43;
i0 += ((NI) 2);
j0 = ((NI) 0);
{
while (1) {
if (!(((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))) goto LA46;
j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48));
i0 += ((NI) 1);
} LA46: ;
}
LOC47 = (NimStringDesc*)0;
LOC47 = HEX24_178856_2381377266(args0[(NI)(j0 - ((NI) 1))]);
LOC48 = (Ropeobj178006*)0;
LOC48 = cgsym_532403_839829468(m0, LOC47);
add_178482_2381377266(&result0, LOC48);
}
goto LA3;
LA43: ;
LA3: ;
start0 = i0;
{
while (1) {
if (!(i0 < length0)) goto LA50;
{
NIM_BOOL LOC53;
LOC53 = (NIM_BOOL)0;
LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(36)));
if (!(LOC53)) goto LA54;
LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(35)));
LA54: ;
if (!LOC53) goto LA55;
i0 += ((NI) 1);
}
goto LA51;
LA55: ;
{
goto LA49;
}
LA51: ;
} LA50: ;
} LA49: ;
{
NimStringDesc* LOC62;
if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA60;
LOC62 = (NimStringDesc*)0;
LOC62 = copyStrLast(frmt0, start0, (NI)(i0 - ((NI) 1)));
add_178487_2381377266(&result0, LOC62);
}
LA60: ;
} LA2: ;
}
return result0;
}
static N_INLINE(NIM_BOOL, crossescppboundary_560754_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
Tsym292834* LOC4;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
if (!(LOC2)) goto LA3;
LOC4 = (Tsym292834*)0;
LOC4 = getmodule_299123_2984716966(sym0);
LOC2 = !((((*LOC4).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0));
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA5;
LOC1 = !((gcmd_169132_2607990831 == ((Tcommands169076) 2)));
LA5: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genprocprototype_539254_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
{ useheader_532369_839829468(m0, sym0);
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA3;
goto BeforeRet;
}
LA3: ;
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA7;
{
NIM_BOOL LOC11;
Tsym292834* LOC12;
NIM_BOOL LOC14;
TY532811 LOC17;
Ropeobj178006* LOC18;
LOC11 = (NIM_BOOL)0;
LOC12 = (Tsym292834*)0;
LOC12 = getmodule_299123_2984716966(sym0);
LOC11 = !(((*LOC12).Sup.id == (*(*m0).module).Sup.id));
if (!(LOC11)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LOC11 = !(LOC14);
LA13: ;
if (!LOC11) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535671_839829468(m0, (*sym0).loc.t);
LOC17[1] = mangledynlibproc_538816_839829468(sym0);
LOC18 = (Ropeobj178006*)0;
LOC18 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_245), LOC17, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC18);
}
LA15: ;
}
goto LA5;
LA7: ;
{
NIM_BOOL LOC20;
Ropeobj178006* header0;
TY178507 LOC47;
Ropeobj178006* LOC48;
LOC20 = (NIM_BOOL)0;
LOC20 = containsorincl_268862_2627731572((&(*m0).declaredprotos), (*sym0).Sup.id);
if (!!(LOC20)) goto LA21;
header0 = genprocheader_535867_839829468(m0, sym0);
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0);
if (!(LOC25)) goto LA26;
LOC25 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0);
LA26: ;
if (!LOC25) goto LA27;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_213), header0);
}
LA27: ;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = !(((*(*sym0).typ).callconv == ((Tcallingconvention292002) 5)));
if (!(LOC31)) goto LA32;
LOC31 = crossescppboundary_560754_839829468(m0, sym0);
LA32: ;
if (!LOC31) goto LA33;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_246), header0);
}
LA33: ;
{
NIM_BOOL LOC37;
LOC37 = (NIM_BOOL)0;
LOC37 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0);
if (!(LOC37)) goto LA38;
LOC37 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 7))&7U)))!=0);
LA38: ;
if (!LOC37) goto LA39;
add_178487_2381377266(&header0, ((NimStringDesc*) &T839829468_247));
}
LA39: ;
{
NIM_BOOL LOC43;
LOC43 = (NIM_BOOL)0;
LOC43 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0);
if (!(LOC43)) goto LA44;
LOC43 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 7))&7U)))!=0);
LA44: ;
if (!LOC43) goto LA45;
add_178487_2381377266(&header0, ((NimStringDesc*) &T839829468_248));
}
LA45: ;
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = header0;
LOC48 = (Ropeobj178006*)0;
LOC48 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_191), LOC47, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], LOC48);
}
goto LA5;
LA21: ;
LA5: ;
}BeforeRet: ;
}
static N_INLINE(NIM_BOOL, usesnativegc_169177_2607990831)(void) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = (((Tgcmode169080) 5) <= gselectedgc_169133_2607990831);
return result0;
}
N_NIMCALL(void, genrefassign_538311_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY532811 LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = (dest0.s == ((Tstorageloc292812) 2));
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = usesnativegc_169177_2607990831();
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(dest0);
LOC8[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC8, 2);
}
goto LA1;
LA6: ;
{
if (!(dest0.s == ((Tstorageloc292812) 3))) goto LA10;
{
NIM_BOOL LOC14;
TY532811 LOC17;
LOC14 = (NIM_BOOL)0;
LOC14 = canformacycle_320123_3876443242(dest0.t);
if (!LOC14) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_249), LOC17, 2);
}
goto LA12;
LA15: ;
{
TY532811 LOC19;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(dest0);
LOC19[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_250), LOC19, 2);
}
LA12: ;
}
goto LA1;
LA10: ;
{
TY532811 LOC21;
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = addrloc_538204_839829468(dest0);
LOC21[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_251), LOC21, 2);
}
LA1: ;
}
N_NIMCALL(void, optasgnloc_549788_839829468)(Tloc292816 a0, Ttype292840* t0, Ropeobj178006* field0, Tloc292816* Result) {
Ropeobj178006* LOC1;
Ropeobj178006* LOC2;
(*Result).k = ((Tlockind292808) 5);
(*Result).s = a0.s;
unsureAsgnRef((void**) (&(*Result).t), t0);
LOC1 = (Ropeobj178006*)0;
LOC1 = rdloc_538188_839829468(a0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX26_178447_2381377266(LOC1, ((NimStringDesc*) &T839829468_257));
unsureAsgnRef((void**) (&(*Result).r), HEX26_178418_2381377266(LOC2, field0));
}
N_NIMCALL(void, genoptasgntuple_550001_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0) {
Tassignmentflag538302Set newflags0;
Ttype292840* t_550053_839829468;
Ttype292840* LOC9;
{
if (!(src0.s == ((Tstorageloc292812) 1))) goto LA3;
newflags0 = (flags0 | 1);
}
goto LA1;
LA3: ;
{
if (!(((*dest0.t).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0)) goto LA6;
newflags0 = (flags0 & ~ 1);
}
goto LA1;
LA6: ;
{
newflags0 = flags0;
}
LA1: ;
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059(dest0.t, IL64(211106232576256));
t_550053_839829468 = getuniquetype_528640_2036603609(LOC9);
{
NI i_550071_839829468;
NI HEX3Atmp_550077_839829468;
NI LOC11;
NI res_550080_839829468;
i_550071_839829468 = (NI)0;
HEX3Atmp_550077_839829468 = (NI)0;
LOC11 = (NI)0;
LOC11 = len_295339_850551059(t_550053_839829468);
HEX3Atmp_550077_839829468 = (LOC11 - 1);
res_550080_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* t0;
Ropeobj178006* field0;
TY178507 LOC14;
Tloc292816 LOC15;
Tloc292816 LOC16;
if (!(res_550080_839829468 <= HEX3Atmp_550077_839829468)) goto LA13;
i_550071_839829468 = res_550080_839829468;
t0 = (*t_550053_839829468).sons->data[i_550071_839829468];
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178401_2381377266(((NI64) (i_550071_839829468)));
field0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_260), LOC14, 1);
memset((void*)(&LOC15), 0, sizeof(LOC15));
optasgnloc_549788_839829468(dest0, t0, field0, (&LOC15));
memset((void*)(&LOC16), 0, sizeof(LOC16));
optasgnloc_549788_839829468(src0, t0, field0, (&LOC16));
genassignment_539264_839829468(p0, LOC15, LOC16, newflags0);
res_550080_839829468 += ((NI) 1);
} LA13: ;
}
}
}
N_NIMCALL(void, gengenericasgn_550167_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0) {
{
NIM_BOOL LOC3;
Ttype292840* LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (LOC3) goto LA4;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059(dest0.t, IL64(211106242013440));
LOC3 = (((*LOC5).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA6;
{
NIM_BOOL LOC10;
NIM_BOOL LOC12;
TY535238 LOC15;
LOC10 = (NIM_BOOL)0;
LOC10 = (dest0.s == ((Tstorageloc292812) 2));
if (LOC10) goto LA11;
LOC12 = (NIM_BOOL)0;
LOC12 = usesnativegc_169177_2607990831();
LOC10 = !(LOC12);
LA11: ;
if (!LOC10) goto LA13;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = addrloc_538204_839829468(dest0);
LOC15[1] = addrloc_538204_839829468(src0);
LOC15[2] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_261), LOC15, 3);
}
goto LA8;
LA13: ;
{
TY535238 LOC17;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = addrloc_538204_839829468(src0);
LOC17[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_262), LOC17, 3);
}
LA8: ;
}
goto LA1;
LA6: ;
{
TY535238 LOC19;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(dest0);
LOC19[1] = addrloc_538204_839829468(src0);
LOC19[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_263), LOC19, 3);
}
LA1: ;
}
N_NIMCALL(NI, asgncomplexity_549750_839829468)(Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
if (!!((n0 == NIM_NIL))) goto LA3;
switch ((*n0).kind) {
case ((Tnodekind292020) 3):
{
result0 = ((NI) 1);
}
break;
case ((Tnodekind292020) 139):
{
result0 = ((NI) 100);
}
break;
case ((Tnodekind292020) 138):
{
{
Tnode292802* t_549767_839829468;
t_549767_839829468 = (Tnode292802*)0;
{
NI i_549781_839829468;
NI HEX3Atmp_549783_839829468;
NI LOC10;
NI res_549785_839829468;
i_549781_839829468 = (NI)0;
HEX3Atmp_549783_839829468 = (NI)0;
LOC10 = (NI)0;
LOC10 = len_293081_850551059(n0);
HEX3Atmp_549783_839829468 = (LOC10 - 1);
res_549785_839829468 = ((NI) 0);
{
while (1) {
NI LOC13;
if (!(res_549785_839829468 <= HEX3Atmp_549783_839829468)) goto LA12;
i_549781_839829468 = res_549785_839829468;
t_549767_839829468 = (*n0).kindU.S6.sons->data[i_549781_839829468];
LOC13 = (NI)0;
LOC13 = asgncomplexity_549750_839829468(t_549767_839829468);
result0 += LOC13;
res_549785_839829468 += ((NI) 1);
} LA12: ;
}
}
}
}
break;
default:
{
}
break;
}
}
LA3: ;
return result0;
}
N_NIMCALL(void, genoptasgnobject_550084_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0, Tnode292802* t0) {
Tassignmentflag538302Set newflags0;
{ {
if (!(t0 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
{
if (!(src0.s == ((Tstorageloc292812) 1))) goto LA7;
newflags0 = (flags0 | 1);
}
goto LA5;
LA7: ;
{
if (!(((*dest0.t).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0)) goto LA10;
newflags0 = (flags0 & ~ 1);
}
goto LA5;
LA10: ;
{
newflags0 = flags0;
}
LA5: ;
switch ((*t0).kind) {
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
Tloc292816 LOC14;
Tloc292816 LOC15;
field0 = (*t0).kindU.S4.sym;
memset((void*)(&LOC14), 0, sizeof(LOC14));
optasgnloc_549788_839829468(dest0, (*field0).typ, (*field0).loc.r, (&LOC14));
memset((void*)(&LOC15), 0, sizeof(LOC15));
optasgnloc_549788_839829468(src0, (*field0).typ, (*field0).loc.r, (&LOC15));
genassignment_539264_839829468(p0, LOC14, LOC15, newflags0);
}
break;
case ((Tnodekind292020) 138):
{
{
Tnode292802* child_550155_839829468;
child_550155_839829468 = (Tnode292802*)0;
{
NI i_550160_839829468;
NI HEX3Atmp_550162_839829468;
NI LOC19;
NI res_550164_839829468;
i_550160_839829468 = (NI)0;
HEX3Atmp_550162_839829468 = (NI)0;
LOC19 = (NI)0;
LOC19 = len_293081_850551059(t0);
HEX3Atmp_550162_839829468 = (LOC19 - 1);
res_550164_839829468 = ((NI) 0);
{
while (1) {
if (!(res_550164_839829468 <= HEX3Atmp_550162_839829468)) goto LA21;
i_550160_839829468 = res_550164_839829468;
child_550155_839829468 = (*t0).kindU.S6.sons->data[i_550160_839829468];
genoptasgnobject_550084_839829468(p0, dest0, src0, newflags0, child_550155_839829468);
res_550164_839829468 += ((NI) 1);
} LA21: ;
}
}
}
}
break;
default:
{
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, genassignment_539264_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0) {
Ttype292840* ty0;
{ {
NIM_BOOL LOC3;
TY532811 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = !((src0.t == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = ((*src0.t).kind == ((Ttypekind292244) 21));
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(dest0);
LOC7[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC7, 2);
goto BeforeRet;
}
LA5: ;
ty0 = skiptypes_296099_850551059(dest0.t, IL64(211106233624832));
switch ((*ty0).kind) {
case ((Ttypekind292244) 22):
{
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
break;
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (!(LOC12)) goto LA13;
LOC12 = !((src0.s == ((Tstorageloc292812) 1)));
LA13: ;
if (!LOC12) goto LA14;
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
goto LA10;
LA14: ;
{
TY535238 LOC17;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = rdloc_538188_839829468(src0);
LOC17[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_252), LOC17, 3);
}
LA10: ;
}
break;
case ((Ttypekind292244) 28):
{
{
NIM_BOOL LOC21;
LOC21 = (NIM_BOOL)0;
LOC21 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (!(LOC21)) goto LA22;
LOC21 = !((src0.s == ((Tstorageloc292812) 1)));
LA22: ;
if (!LOC21) goto LA23;
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
goto LA19;
LA23: ;
{
{
NIM_BOOL LOC28;
NIM_BOOL LOC30;
TY532811 LOC33;
LOC28 = (NIM_BOOL)0;
LOC28 = (dest0.s == ((Tstorageloc292812) 2));
if (LOC28) goto LA29;
LOC30 = (NIM_BOOL)0;
LOC30 = usesnativegc_169177_2607990831();
LOC28 = !(LOC30);
LA29: ;
if (!LOC28) goto LA31;
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rdloc_538188_839829468(dest0);
LOC33[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_253), LOC33, 2);
}
goto LA26;
LA31: ;
{
Tloc292816 tmp0;
TY535238 LOC37;
TY178507 LOC38;
if (!(dest0.s == ((Tstorageloc292812) 3))) goto LA35;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, ty0, (&tmp0), NIM_FALSE);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = rdloc_538188_839829468(dest0);
LOC37[1] = rdloc_538188_839829468(src0);
LOC37[2] = rdloc_538188_839829468(tmp0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_254), LOC37, 3);
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = rdloc_538188_839829468(tmp0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC38, 1);
}
goto LA26;
LA35: ;
{
TY532811 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = addrloc_538204_839829468(dest0);
LOC40[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_256), LOC40, 2);
}
LA26: ;
}
LA19: ;
}
break;
case ((Ttypekind292244) 25):
{
{
NIM_BOOL LOC44;
Tloc292816 a0;
Ropeobj178006* LOC47;
Tloc292816 LOC48;
Tloc292816 b0;
Ropeobj178006* LOC49;
Tloc292816 LOC50;
TY532811 LOC51;
LOC44 = (NIM_BOOL)0;
LOC44 = needscomplexassignment_533509_839829468(dest0.t);
if (!LOC44) goto LA45;
memset((void*)(&a0), 0, sizeof(a0));
LOC47 = (Ropeobj178006*)0;
LOC47 = rope_178277_2381377266(((NimStringDesc*) &T839829468_258));
memset((void*)(&LOC48), 0, sizeof(LOC48));
optasgnloc_549788_839829468(dest0, dest0.t, LOC47, (&LOC48));
memcpy((void*)(&a0), (NIM_CONST void*)(&LOC48), sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
LOC49 = (Ropeobj178006*)0;
LOC49 = rope_178277_2381377266(((NimStringDesc*) &T839829468_258));
memset((void*)(&LOC50), 0, sizeof(LOC50));
optasgnloc_549788_839829468(src0, dest0.t, LOC49, (&LOC50));
memcpy((void*)(&b0), (NIM_CONST void*)(&LOC50), sizeof(b0));
genrefassign_538311_839829468(p0, a0, b0, flags0);
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = rdloc_538188_839829468(dest0);
LOC51[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_259), LOC51, 2);
}
goto LA42;
LA45: ;
{
TY532811 LOC53;
memset((void*)LOC53, 0, sizeof(LOC53));
LOC53[0] = rdloc_538188_839829468(dest0);
LOC53[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC53, 2);
}
LA42: ;
}
break;
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC57;
LOC57 = (NIM_BOOL)0;
LOC57 = needscomplexassignment_533509_839829468(dest0.t);
if (!LOC57) goto LA58;
{
NI LOC62;
LOC62 = (NI)0;
LOC62 = len_295339_850551059(dest0.t);
if (!(LOC62 <= ((NI) 4))) goto LA63;
genoptasgntuple_550001_839829468(p0, dest0, src0, flags0);
}
goto LA60;
LA63: ;
{
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
LA60: ;
}
goto LA55;
LA58: ;
{
TY532811 LOC67;
memset((void*)LOC67, 0, sizeof(LOC67));
LOC67[0] = rdloc_538188_839829468(dest0);
LOC67[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC67, 2);
}
LA55: ;
}
break;
case ((Ttypekind292244) 17):
{
{
NIM_BOOL LOC71;
TY532811 LOC74;
LOC71 = (NIM_BOOL)0;
LOC71 = isimportedcpptype_533476_839829468(ty0);
if (!LOC71) goto LA72;
memset((void*)LOC74, 0, sizeof(LOC74));
LOC74[0] = rdloc_538188_839829468(dest0);
LOC74[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC74, 2);
}
goto LA69;
LA72: ;
{
NIM_BOOL LOC76;
LOC76 = (NIM_BOOL)0;
LOC76 = isobjlackingtypefield_533513_839829468(ty0);
if (!!(LOC76)) goto LA77;
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
goto LA69;
LA77: ;
{
NIM_BOOL LOC80;
LOC80 = (NIM_BOOL)0;
LOC80 = needscomplexassignment_533509_839829468(ty0);
if (!LOC80) goto LA81;
{
NIM_BOOL LOC85;
NI LOC87;
Ropeobj178006* LOC90;
LOC85 = (NIM_BOOL)0;
LOC85 = (*ty0).sons->data[((NI) 0)] == 0;
if (!(LOC85)) goto LA86;
LOC87 = (NI)0;
LOC87 = asgncomplexity_549750_839829468((*ty0).n);
LOC85 = (LOC87 <= ((NI) 4));
LA86: ;
if (!LOC85) goto LA88;
LOC90 = (Ropeobj178006*)0;
LOC90 = gettypedesc_535671_839829468((*p0).module, ty0);
ty0 = getuniquetype_528640_2036603609(ty0);
{
NimStringDesc* LOC95;
if (!!(!(((*ty0).n == NIM_NIL)))) goto LA93;
LOC95 = (NimStringDesc*)0;
LOC95 = HEX24_196185_1689653243(T839829468_264);
internalerror_196113_155036129(LOC95);
}
LA93: ;
genoptasgnobject_550084_839829468(p0, dest0, src0, flags0, (*ty0).n);
}
goto LA83;
LA88: ;
{
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
LA83: ;
}
goto LA69;
LA81: ;
{
TY532811 LOC98;
memset((void*)LOC98, 0, sizeof(LOC98));
LOC98[0] = rdloc_538188_839829468(dest0);
LOC98[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC98, 2);
}
LA69: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
{
NIM_BOOL LOC102;
LOC102 = (NIM_BOOL)0;
LOC102 = needscomplexassignment_533509_839829468(dest0.t);
if (!LOC102) goto LA103;
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
goto LA100;
LA103: ;
{
TY535238 LOC106;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC106, 0, sizeof(LOC106));
LOC106[0] = rdloc_538188_839829468(dest0);
LOC106[1] = rdloc_538188_839829468(src0);
LOC106[2] = gettypedesc_535671_839829468((*p0).module, ty0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_261), LOC106, 3);
}
LA100: ;
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
{
NIM_BOOL LOC110;
TY535238 LOC113;
LOC110 = (NIM_BOOL)0;
LOC110 = needscomplexassignment_533509_839829468(dest0.t);
if (!LOC110) goto LA111;
memset((void*)LOC113, 0, sizeof(LOC113));
LOC113[0] = addrloc_538204_839829468(dest0);
LOC113[1] = addrloc_538204_839829468(src0);
LOC113[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_266), LOC113, 3);
}
goto LA108;
LA111: ;
{
TY532811 LOC115;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC115, 0, sizeof(LOC115));
LOC115[0] = rdloc_538188_839829468(dest0);
LOC115[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_267), LOC115, 2);
}
LA108: ;
}
break;
case ((Ttypekind292244) 19):
{
{
Tctypekind529007 LOC119;
TY535238 LOC122;
NI64 LOC123;
LOC119 = (Tctypekind529007)0;
LOC119 = maptype_533393_839829468(ty0);
if (!(LOC119 == ((Tctypekind529007) 17))) goto LA120;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC122, 0, sizeof(LOC122));
LOC122[0] = rdloc_538188_839829468(dest0);
LOC122[1] = rdloc_538188_839829468(src0);
LOC123 = (NI64)0;
LOC123 = getsize_320135_3876443242(dest0.t);
LOC122[2] = rope_178401_2381377266(LOC123);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_268), LOC122, 3);
}
goto LA117;
LA120: ;
{
TY532811 LOC125;
memset((void*)LOC125, 0, sizeof(LOC125));
LOC125[0] = rdloc_538188_839829468(dest0);
LOC125[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC125, 2);
}
LA117: ;
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 23):
{
TY532811 LOC127;
memset((void*)LOC127, 0, sizeof(LOC127));
LOC127[0] = rdloc_538188_839829468(dest0);
LOC127[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC127, 2);
}
break;
default:
{
NimStringDesc* LOC129;
LOC129 = (NimStringDesc*)0;
LOC129 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 15);
appendString(LOC129, ((NimStringDesc*) &T839829468_269));
appendString(LOC129, reprEnum((NI)(*ty0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC129);
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, putlocintodest_539258_839829468)(Tcproc529021* p0, Tloc292816* d0, Tloc292816 s0) {
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (*d0), s0, 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (*d0), s0, 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
genericAssign((void*)(&(*d0)), (void*)(&s0), (&NTI292816));
}
LA1: ;
}
N_NIMCALL(NIM_BOOL, issimpleconst_532311_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
Ttype292840* t0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
result0 = (NIM_BOOL)0;
t0 = skiptypes_296099_850551059(typ0, IL64(211106240964864));
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).kind == ((Ttypekind292244) 18) || (*t0).kind == ((Ttypekind292244) 17) || (*t0).kind == ((Ttypekind292244) 16) || (*t0).kind == ((Ttypekind292244) 4) || (*t0).kind == ((Ttypekind292244) 19) || (*t0).kind == ((Ttypekind292244) 24)));
if (!(LOC1)) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
LOC1 = !(LOC3);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, putintodest_550468_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0, Tstorageloc292812 s0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
initloc_532273_839829468((&a0), ((Tlockind292808) 6), t0, s0);
a0.r = r0;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (*d0), a0, 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (*d0), a0, 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
(*d0).k = ((Tlockind292808) 6);
unsureAsgnRef((void**) (&(*d0).t), t0);
unsureAsgnRef((void**) (&(*d0).r), r0);
}
LA1: ;
}
N_NIMCALL(NI64, bitsettoword_549578_839829468)(Tbitset339004* s0, NI size0) {
NI64 result0;
result0 = (NI64)0;
result0 = IL64(0);
{
NI j_549612_839829468;
NI HEX3Atmp_549622_839829468;
NI res_549625_839829468;
j_549612_839829468 = (NI)0;
HEX3Atmp_549622_839829468 = (NI)0;
HEX3Atmp_549622_839829468 = (NI)(size0 - ((NI) 1));
res_549625_839829468 = ((NI) 0);
{
while (1) {
if (!(res_549625_839829468 <= HEX3Atmp_549622_839829468)) goto LA3;
j_549612_839829468 = res_549625_839829468;
{
if (!(j_549612_839829468 < (s0 ? s0->Sup.len : 0))) goto LA6;
result0 = (NI64)(result0 | (NI64)((NU64)(((NI64)(NU64)(NU8)(s0->data[j_549612_839829468]))) << (NU64)(((NI64) ((NI)(j_549612_839829468 * ((NI) 8)))))));
}
LA6: ;
res_549625_839829468 += ((NI) 1);
} LA3: ;
}
}
return result0;
}
N_NIMCALL(Ropeobj178006*, genrawsetdata_549629_839829468)(Tbitset339004* cs0, NI size0) {
Ropeobj178006* result0;
NimStringDesc* frmt0;
result0 = (Ropeobj178006*)0;
frmt0 = (NimStringDesc*)0;
{
TY533289 LOC5;
if (!(((NI) 8) < size0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_273), LOC5, 0);
{
NI i_549649_839829468;
NI HEX3Atmp_549657_839829468;
NI res_549660_839829468;
i_549649_839829468 = (NI)0;
HEX3Atmp_549657_839829468 = (NI)0;
HEX3Atmp_549657_839829468 = (NI)(size0 - ((NI) 1));
res_549660_839829468 = ((NI) 0);
{
while (1) {
TY178507 LOC19;
NimStringDesc* LOC20;
if (!(res_549660_839829468 <= HEX3Atmp_549657_839829468)) goto LA8;
i_549649_839829468 = res_549660_839829468;
{
if (!(i_549649_839829468 < (NI)(size0 - ((NI) 1)))) goto LA11;
{
if (!(((NI) ((NI)((NI)(i_549649_839829468 + ((NI) 1)) % ((NI) 8)))) == ((NI) 0))) goto LA15;
frmt0 = copyString(((NimStringDesc*) &T839829468_274));
}
goto LA13;
LA15: ;
{
frmt0 = copyString(((NimStringDesc*) &T839829468_275));
}
LA13: ;
}
goto LA9;
LA11: ;
{
frmt0 = copyString(((NimStringDesc*) &T839829468_276));
}
LA9: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (NimStringDesc*)0;
LOC20 = nsuToHex(((NI64)(NU64)(NU8)(cs0->data[i_549649_839829468])), ((NI) 2));
LOC19[0] = rope_178277_2381377266(LOC20);
addf_179205_2381377266(&result0, frmt0, LOC19, 1);
res_549660_839829468 += ((NI) 1);
} LA8: ;
}
}
}
goto LA1;
LA3: ;
{
NI64 LOC22;
LOC22 = (NI64)0;
LOC22 = bitsettoword_549578_839829468(cs0, size0);
result0 = intliteral_539270_839829468(LOC22);
}
LA1: ;
return result0;
}
N_NIMCALL(void, appcg_532640_839829468)(Tcgen529027* m0, Tcfilesection529005 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = ropecg_532407_839829468(m0, frmt0, args0, args0Len0);
add_178482_2381377266(&(*m0).s[(s0)- 0], LOC1);
}
N_NIMCALL(Ropeobj178006*, genconstseq_559371_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* t0) {
Ropeobj178006* result0;
Ropeobj178006* data0;
TY178507 LOC1;
NI LOC2;
TY535235 LOC18;
NI LOC19;
TY532811 LOC20;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
LOC1[0] = rope_178401_2381377266(((NI64) (LOC2)));
data0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_277), LOC1, 1);
{
NI LOC5;
LOC5 = (NI)0;
LOC5 = len_293081_850551059(n0);
if (!(((NI) 0) < LOC5)) goto LA6;
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_278));
{
NI i_559395_839829468;
NI HEX3Atmp_559411_839829468;
NI LOC9;
NI res_559414_839829468;
i_559395_839829468 = (NI)0;
HEX3Atmp_559411_839829468 = (NI)0;
LOC9 = (NI)0;
LOC9 = len_293081_850551059(n0);
HEX3Atmp_559411_839829468 = (NI)(LOC9 - ((NI) 1));
res_559414_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC17;
if (!(res_559414_839829468 <= HEX3Atmp_559411_839829468)) goto LA11;
i_559395_839829468 = res_559414_839829468;
{
TY533289 LOC16;
if (!(((NI) 0) < i_559395_839829468)) goto LA14;
memset((void*)LOC16, 0, sizeof(LOC16));
addf_179205_2381377266(&data0, ((NimStringDesc*) &T839829468_279), LOC16, 0);
}
LA14: ;
LOC17 = (Ropeobj178006*)0;
LOC17 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[i_559395_839829468]);
add_178482_2381377266(&data0, LOC17);
res_559414_839829468 += ((NI) 1);
} LA11: ;
}
}
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_280));
}
LA6: ;
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_280));
result0 = gettempname_533596_839829468((*p0).module);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = gettypedesc_535671_839829468((*p0).module, (*t0).sons->data[((NI) 0)]);
LOC19 = (NI)0;
LOC19 = len_293081_850551059(n0);
LOC18[1] = rope_178401_2381377266(((NI64) (LOC19)));
LOC18[2] = result0;
LOC18[3] = data0;
appcg_532640_839829468((*p0).module, ((Tcfilesection529005) 8), ((NimStringDesc*) &T839829468_281), LOC18, 4);
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC20[1] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_282), LOC20, 2);
return result0;
}
N_NIMCALL(Ropeobj178006*, gennamedconstexpr_559284_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!((*n0).kind == ((Tnodekind292020) 34))) goto LA3;
result0 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]);
}
goto LA1;
LA3: ;
{
result0 = genconstexpr_554849_839829468(p0, n0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genconstsimplelist_559299_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
NI length0;
TY533289 LOC10;
result0 = (Ropeobj178006*)0;
length0 = sonslen_295351_850551059(n0);
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_223));
{
NI i_559333_839829468;
NI HEX3Atmp_559362_839829468;
NI HEX3Atmp_559363_839829468;
NI res_559366_839829468;
i_559333_839829468 = (NI)0;
HEX3Atmp_559362_839829468 = (NI)0;
HEX3Atmp_559363_839829468 = (NI)0;
HEX3Atmp_559362_839829468 = ((*n0).kind == ((Tnodekind292020) 38));
HEX3Atmp_559363_839829468 = (NI)(length0 - ((NI) 2));
res_559366_839829468 = ((NI) (HEX3Atmp_559362_839829468));
{
while (1) {
TY178507 LOC4;
if (!(res_559366_839829468 <= HEX3Atmp_559363_839829468)) goto LA3;
i_559333_839829468 = res_559366_839829468;
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = gennamedconstexpr_559284_839829468(p0, (*n0).kindU.S6.sons->data[i_559333_839829468]);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_283), LOC4, 1);
res_559366_839829468 += ((NI) 1);
} LA3: ;
}
}
{
Ropeobj178006* LOC9;
if (!(((NI) (((*n0).kind == ((Tnodekind292020) 38)))) < length0)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = gennamedconstexpr_559284_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))]);
add_178482_2381377266(&result0, LOC9);
}
LA7: ;
memset((void*)LOC10, 0, sizeof(LOC10));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_160), LOC10, 0);
return result0;
}
N_NIMCALL(Ropeobj178006*, genconstexpr_554849_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
switch ((*n0).kind) {
case ((Tnodekind292020) 58):
case ((Tnodekind292020) 59):
{
result0 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]);
}
break;
case ((Tnodekind292020) 39):
{
Tbitset339004* cs0;
NI64 LOC3;
cs0 = (Tbitset339004*)0;
tobitset_340001_452470228(n0, (&cs0));
LOC3 = (NI64)0;
LOC3 = getsize_320135_3876443242((*n0).typ);
result0 = genrawsetdata_549629_839829468(cs0, ((NI) (LOC3)));
}
break;
case ((Tnodekind292020) 41):
case ((Tnodekind292020) 37):
case ((Tnodekind292020) 155):
case ((Tnodekind292020) 38):
{
Ttype292840* t0;
t0 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
{
if (!((*t0).kind == ((Ttypekind292244) 24))) goto LA7;
result0 = genconstseq_559371_839829468(p0, n0, t0);
}
goto LA5;
LA7: ;
{
result0 = genconstsimplelist_559299_839829468(p0, n0);
}
LA5: ;
}
break;
default:
{
Tloc292816 d0;
memset((void*)(&d0), 0, sizeof(d0));
initlocexpr_539283_839829468(p0, n0, (&d0));
result0 = rdloc_538188_839829468(d0);
}
break;
}
return result0;
}
N_NIMCALL(void, requestconstimpl_539240_839829468)(Tcproc529021* p0, Tsym292834* sym0) {
Tcgen529027* m0;
Tcgen529027* q0;
{ m0 = (*p0).module;
useheader_532369_839829468(m0, sym0);
{
Ropeobj178006* LOC5;
if (!((*sym0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 8), (*sym0).typ, LOC5, ((Tstorageloc292812) 1));
}
LA3: ;
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA8;
goto BeforeRet;
}
LA8: ;
q0 = findpendingmodule_532241_839829468(m0, sym0);
{
NIM_BOOL LOC12;
NIM_BOOL LOC14;
TY535238 LOC17;
LOC12 = (NIM_BOOL)0;
LOC12 = !((q0 == NIM_NIL));
if (!(LOC12)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*sym0).Sup.id);
LOC12 = !(LOC14);
LA13: ;
if (!LOC12) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535671_839829468(q0, (*sym0).typ);
LOC17[1] = (*sym0).loc.r;
LOC17[2] = genconstexpr_554849_839829468((*q0).initproc, (*sym0).ast);
addf_179205_2381377266(&(*q0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3);
}
LA15: ;
{
NIM_BOOL LOC20;
NIM_BOOL LOC22;
Ropeobj178006* headerdecl0;
TY532811 LOC25;
LOC20 = (NIM_BOOL)0;
LOC20 = !((q0 == m0));
if (!(LOC20)) goto LA21;
LOC22 = (NIM_BOOL)0;
LOC22 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LOC20 = !(LOC22);
LA21: ;
if (!LOC20) goto LA23;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = gettypedesc_535671_839829468(m0, (*sym0).loc.t);
LOC25[1] = (*sym0).loc.r;
headerdecl0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_284), LOC25, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], headerdecl0);
{
NIM_BOOL LOC28;
LOC28 = (NIM_BOOL)0;
LOC28 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC28)) goto LA29;
LOC28 = !((generatedheader_532201_839829468 == NIM_NIL));
LA29: ;
if (!LOC28) goto LA30;
add_178482_2381377266(&(*generatedheader_532201_839829468).s[(((Tcfilesection529005) 8))- 0], headerdecl0);
}
LA30: ;
}
LA23: ;
}BeforeRet: ;
}
N_NIMCALL(void, gencomplexconst_558249_839829468)(Tcproc529021* p0, Tsym292834* sym0, Tloc292816* d0) {
requestconstimpl_539240_839829468(p0, sym0);
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
static N_INLINE(Ropeobj178006**, procsec_529194_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0) {
Ropeobj178006** result0;
result0 = (Ropeobj178006**)0;
result0 = &(*p0).blocks->data[((NI) 0)].sections[(s0)- 0];
return result0;
}
N_NIMCALL(void, accessthreadlocalvar_532945_839829468)(Tcproc529021* p0, Tsym292834* s0) {
{
NIM_BOOL LOC3;
Ropeobj178006** LOC7;
TY533289 LOC8;
Ropeobj178006** LOC9;
TY533289 LOC10;
Ropeobj178006* LOC11;
LOC3 = (NIM_BOOL)0;
LOC3 = emulatedthreadvars_532949_839829468();
if (!(LOC3)) goto LA4;
LOC3 = !((*p0).threadvaraccessed);
LA4: ;
if (!LOC3) goto LA5;
(*p0).threadvaraccessed = NIM_TRUE;
(*(*p0).module).flags |= ((NU8)1)<<((((Codegenflag529025) 1))%(sizeof(NU8)*8));
LOC7 = (Ropeobj178006**)0;
LOC7 = procsec_529194_3723162438(p0, ((Tcprocsection529011) 0));
memset((void*)LOC8, 0, sizeof(LOC8));
addf_179205_2381377266(LOC7, ((NimStringDesc*) &T839829468_286), LOC8, 0);
LOC9 = (Ropeobj178006**)0;
LOC9 = procsec_529194_3723162438(p0, ((Tcprocsection529011) 1));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_287), LOC10, 0);
add_178482_2381377266(LOC9, LOC11);
}
LA5: ;
}
static N_INLINE(NIM_BOOL, isemptytype_297440_850551059)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = (t0 == NIM_NIL);
if (LOC1) goto LA2;
LOC1 = ((*t0).kind == ((Ttypekind292244) 62) || (*t0).kind == ((Ttypekind292244) 7));
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, putdataintodest_550436_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
initloc_532273_839829468((&a0), ((Tlockind292808) 8), t0, ((Tstorageloc292812) 1));
a0.r = r0;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (*d0), a0, 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (*d0), a0, 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
(*d0).k = ((Tlockind292808) 8);
unsureAsgnRef((void**) (&(*d0).t), t0);
unsureAsgnRef((void**) (&(*d0).r), r0);
}
LA1: ;
}
N_NIMCALL(NIM_BOOL, freshlineinfo_532818_839829468)(Tcproc529021* p0, Tlineinfo191336 info0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*p0).lastlineinfo.line == info0.line));
if (LOC3) goto LA4;
LOC3 = !(((*p0).lastlineinfo.fileindex == info0.fileindex));
LA4: ;
if (!LOC3) goto LA5;
(*p0).lastlineinfo.line = info0.line;
(*p0).lastlineinfo.fileindex = info0.fileindex;
result0 = NIM_TRUE;
}
LA5: ;
return result0;
}
N_NIMCALL(void, genlinedir_532823_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI line0;
Ropeobj178006** LOC11;
NimStringDesc* LOC12;
line0 = safelinenm_532721_839829468((*t0).info);
{
Ropeobj178006** LOC5;
TY533289 LOC6;
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
Ropeobj178006* LOC10;
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 28))&63U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006**)0;
LOC5 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
memset((void*)LOC6, 0, sizeof(LOC6));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_293), LOC6, 0);
LOC8 = (Ropeobj178006*)0;
LOC8 = sourceline_192068_155036129((*t0).info);
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX26_178418_2381377266(LOC7, LOC8);
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX26_178418_2381377266(LOC9, rnl_178903_2381377266);
add_178482_2381377266(LOC5, LOC10);
}
LA3: ;
LOC11 = (Ropeobj178006**)0;
LOC11 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC12 = (NimStringDesc*)0;
LOC12 = tofullpath_192264_155036129((*t0).info.fileindex);
genclinedir_532725_839829468(LOC11, LOC12, line0);
{
NIM_BOOL LOC15;
NIM_BOOL LOC17;
LOC15 = (NIM_BOOL)0;
LOC15 = ((163840 & (*p0).options) == 163840);
if (!(LOC15)) goto LA16;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*p0).prc == NIM_NIL);
if (LOC17) goto LA18;
LOC17 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
LA18: ;
LOC15 = LOC17;
LA16: ;
if (!LOC15) goto LA19;
{
NIM_BOOL LOC23;
TY532811 LOC26;
NimStringDesc* LOC27;
LOC23 = (NIM_BOOL)0;
LOC23 = freshlineinfo_532818_839829468(p0, (*t0).info);
if (!LOC23) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rope_178401_2381377266(((NI64) (line0)));
LOC27 = (NimStringDesc*)0;
LOC27 = tofilename_192260_155036129((*t0).info.fileindex);
LOC26[1] = makecstring_191638_155036129(LOC27);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_294), LOC26, 2);
}
LA24: ;
}
goto LA13;
LA19: ;
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
NIM_BOOL LOC32;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((98304 & (*p0).options) == 98304);
if (!(LOC30)) goto LA31;
LOC32 = (NIM_BOOL)0;
LOC32 = ((*p0).prc == NIM_NIL);
if (LOC32) goto LA33;
LOC32 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
LA33: ;
LOC30 = LOC32;
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA34;
LOC29 = (((NI32) 0) <= (*t0).info.fileindex);
LA34: ;
if (!LOC29) goto LA35;
{
NIM_BOOL LOC39;
TY532811 LOC42;
LOC39 = (NIM_BOOL)0;
LOC39 = freshlineinfo_532818_839829468(p0, (*t0).info);
if (!LOC39) goto LA40;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = rope_178401_2381377266(((NI64) (line0)));
LOC42[1] = quotedfilename_196818_155036129((*t0).info);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_295), LOC42, 2);
}
LA40: ;
}
goto LA13;
LA35: ;
LA13: ;
}
N_NIMCALL(Ropeobj178006*, getlabel_539217_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
(*p0).labels += ((NI) 1);
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*p0).labels)));
result0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC1);
return result0;
}
N_NIMCALL(void, fixlabel_539230_839829468)(Tcproc529021* p0, Ropeobj178006* labl0) {
TY178507 LOC1;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = labl0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_299), LOC1, 1);
}
N_NIMCALL(void, genandor_554311_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Ropeobj178006* L0;
Tloc292816 tmp0;
L0 = (Ropeobj178006*)0;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE);
(*p0).splitdecls += ((NI) 1);
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
L0 = getlabel_539217_839829468(p0);
{
TY532811 LOC5;
if (!(m0 == ((Tmagic292524) 127))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(tmp0);
LOC5[1] = L0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_297), LOC5, 2);
}
goto LA1;
LA3: ;
{
TY532811 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(tmp0);
LOC7[1] = L0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_298), LOC7, 2);
}
LA1: ;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&tmp0));
fixlabel_539230_839829468(p0, L0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA10;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA8;
LA10: ;
{
genassignment_539264_839829468(p0, (*d0), tmp0, 0);
}
LA8: ;
(*p0).splitdecls -= ((NI) 1);
}
N_NIMCALL(void, unaryarith_552646_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Ttype292840* t0;
TY535238 LOC1;
NI64 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
t0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(t0);
LOC1[1] = rope_178401_2381377266((NI64)(LOC2 * IL64(8)));
LOC1[2] = getsimpletypedesc_533936_839829468((*p0).module, (*e0).typ);
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX25_178905_2381377266(unarithtab_552653_839829468[(op0)- 99], LOC1, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC3, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, unaryarithoverflow_551633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Tloc292816 a0;
Ttype292840* t0;
TY532811 LOC7;
NI64 LOC8;
Ropeobj178006* LOC9;
memset((void*)(&a0), 0, sizeof(a0));
t0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
TY532811 LOC5;
NI64 LOC6;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(a0);
LOC6 = (NI64)0;
LOC6 = firstord_320001_3876443242(t0);
LOC5[1] = intliteral_539270_839829468(LOC6);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_317), LOC5, 2);
}
LA3: ;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(a0);
LOC8 = (NI64)0;
LOC8 = getsize_320135_3876443242(t0);
LOC7[1] = rope_178401_2381377266((NI64)(LOC8 * IL64(8)));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX25_178905_2381377266(opr_551640_839829468[(m0)- 96], LOC7, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC9, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binaryarith_551819_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Tloc292816 b0;
NI64 s0;
NI64 LOC1;
NI64 LOC2;
TY535235 LOC3;
Ropeobj178006* LOC4;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
s0 = (NI64)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(a0.t);
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(b0.t);
s0 = (NI64)(((LOC1 >= LOC2) ? LOC1 : LOC2) * IL64(8));
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = rdloc_538188_839829468(a0);
LOC3[1] = rdloc_538188_839829468(b0);
LOC3[2] = rope_178401_2381377266(s0);
LOC3[3] = getsimpletypedesc_533936_839829468((*p0).module, (*e0).typ);
LOC4 = (Ropeobj178006*)0;
LOC4 = HEX25_178905_2381377266(binarithtab_551826_839829468[(op0)- 52], LOC3, 4);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC4, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binaryfloatarith_556728_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
{
Tloc292816 a0;
Tloc292816 b0;
TY535235 LOC5;
Tnode292802* LOC6;
Ropeobj178006* LOC7;
if (!!(((384 & (*p0).options) == 0))) goto LA3;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178277_2381377266(opr_556762_839829468[(m0)- 52]);
LOC5[1] = rdloc_538188_839829468(a0);
LOC5[2] = rdloc_538188_839829468(b0);
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
LOC5[3] = getsimpletypedesc_533936_839829468((*p0).module, (*LOC6).typ);
LOC7 = (Ropeobj178006*)0;
LOC7 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_319), LOC5, 4);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc292812) 0));
{
TY178507 LOC12;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 7))&31U)))!=0)) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468((*d0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_323), LOC12, 1);
}
LA10: ;
{
TY178507 LOC17;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 8))&31U)))!=0)) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468((*d0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_324), LOC17, 1);
}
LA15: ;
}
goto LA1;
LA3: ;
{
binaryarith_551819_839829468(p0, e0, d0, m0);
}
LA1: ;
}
N_NIMCALL(void, geneqproc_552214_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
Ttype292840* LOC3;
TY532811 LOC6;
Ropeobj178006* LOC7;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rdloc_538188_839829468(a0);
LOC6[1] = rdloc_538188_839829468(b0);
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_352), LOC6, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc292812) 0));
}
goto LA1;
LA4: ;
{
TY532811 LOC9;
Ropeobj178006* LOC10;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = rdloc_538188_839829468(a0);
LOC9[1] = rdloc_538188_839829468(b0);
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_341), LOC9, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC10, ((Tstorageloc292812) 0));
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, rdcharloc_538227_839829468)(Tloc292816 a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = rdloc_538188_839829468(a0);
{
Ttype292840* LOC3;
TY178507 LOC6;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059(a0.t, IL64(211106233624832));
if (!((*LOC3).kind == ((Ttypekind292244) 2))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_358), LOC6, 1);
}
LA4: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, binaryarithoverflowraw_551235_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816 a0, Tloc292816 b0, NimStringDesc* frmt0) {
Ropeobj178006* result0;
NI64 size0;
Ropeobj178006* storage0;
TY532811 LOC6;
TY535238 LOC7;
result0 = (Ropeobj178006*)0;
size0 = getsize_320135_3876443242(t0);
{
if (!(size0 < ((NI64) (intsize_176641_4151366050)))) goto LA3;
storage0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_36));
}
goto LA1;
LA3: ;
{
storage0 = gettypedesc_535671_839829468((*p0).module, t0);
}
LA1: ;
result0 = gettempname_533596_839829468((*p0).module);
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = storage0;
LOC6[1] = result0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_54), LOC6, 2);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = result0;
LOC7[1] = rdcharloc_538227_839829468(a0);
LOC7[2] = rdcharloc_538227_839829468(b0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC7, 3);
{
NIM_BOOL LOC10;
TY535238 LOC14;
NI64 LOC15;
NI64 LOC16;
LOC10 = (NIM_BOOL)0;
LOC10 = (size0 < ((NI64) (intsize_176641_4151366050)));
if (LOC10) goto LA11;
LOC10 = ((*t0).kind == ((Ttypekind292244) 20) || (*t0).kind == ((Ttypekind292244) 14));
LA11: ;
if (!LOC10) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = result0;
LOC15 = (NI64)0;
LOC15 = firstord_320001_3876443242(t0);
LOC14[1] = intliteral_539270_839829468(LOC15);
LOC16 = (NI64)0;
LOC16 = lastord_320004_3876443242(t0);
LOC14[2] = intliteral_539270_839829468(LOC16);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_359), LOC14, 3);
}
LA12: ;
return result0;
}
N_NIMCALL(void, binaryarithoverflow_551262_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* t0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
Ropeobj178006* res0;
TY535238 LOC5;
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC5[1] = rdloc_538188_839829468(a0);
LOC5[2] = rdloc_538188_839829468(b0);
res0 = HEX25_178905_2381377266(opr_551279_839829468[(m0)- 45], LOC5, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, res0, ((Tstorageloc292812) 0));
}
goto LA1;
LA3: ;
{
Ropeobj178006* res0;
NimStringDesc* LOC7;
TY532811 LOC13;
Ropeobj178006* LOC14;
LOC7 = (NimStringDesc*)0;
{
if (!((*t0).kind == ((Ttypekind292244) 35))) goto LA10;
LOC7 = copyString(prc64_551274_839829468[(m0)- 45]);
}
goto LA8;
LA10: ;
{
LOC7 = copyString(prc_551269_839829468[(m0)- 45]);
}
LA8: ;
res0 = binaryarithoverflowraw_551235_839829468(p0, t0, a0, b0, LOC7);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC13[1] = res0;
LOC14 = (Ropeobj178006*)0;
LOC14 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_370), LOC13, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC14, ((Tstorageloc292812) 0));
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, lenfield_539305_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
NimStringDesc* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (NimStringDesc*)0;
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC4) goto LA5;
LOC4 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA5: ;
if (!LOC4) goto LA6;
LOC1 = copyString(((NimStringDesc*) &T839829468_157));
}
goto LA2;
LA6: ;
{
LOC1 = copyString(((NimStringDesc*) &T839829468_158));
}
LA2: ;
result0 = rope_178277_2381377266(LOC1);
return result0;
}
N_NIMCALL(void, gcusage_554439_839829468)(Tnode292802* n0) {
{
NimStringDesc* LOC5;
if (!(gselectedgc_169133_2607990831 == ((Tgcmode169080) 0))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = rendertree_311044_382274130(n0, 0);
message_196095_155036129((*n0).info, ((Tmsgkind191002) 263), LOC5);
}
LA3: ;
}
N_NIMCALL(void, genrepr_555339_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* t0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
switch ((*t0).kind) {
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 35):
case ((Ttypekind292244) 40) ... ((Ttypekind292244) 44):
{
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468(a0);
LOC3 = (Ropeobj178006*)0;
LOC3 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_371), LOC2, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC3, a0.s);
}
break;
case ((Ttypekind292244) 36) ... ((Ttypekind292244) 39):
{
TY178507 LOC5;
Ropeobj178006* LOC6;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(a0);
LOC6 = (Ropeobj178006*)0;
LOC6 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_372), LOC5, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC6, a0.s);
}
break;
case ((Ttypekind292244) 1):
{
TY178507 LOC8;
Ropeobj178006* LOC9;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC9 = (Ropeobj178006*)0;
LOC9 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_373), LOC8, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC9, a0.s);
}
break;
case ((Ttypekind292244) 2):
{
TY178507 LOC11;
Ropeobj178006* LOC12;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdloc_538188_839829468(a0);
LOC12 = (Ropeobj178006*)0;
LOC12 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_374), LOC11, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC12, a0.s);
}
break;
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 15):
{
TY532811 LOC14;
Ropeobj178006* LOC15;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468(a0);
LOC14[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC15 = (Ropeobj178006*)0;
LOC15 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_375), LOC14, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC15, a0.s);
}
break;
case ((Ttypekind292244) 28):
{
TY178507 LOC17;
Ropeobj178006* LOC18;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468(a0);
LOC18 = (Ropeobj178006*)0;
LOC18 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_376), LOC17, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC18, a0.s);
}
break;
case ((Ttypekind292244) 19):
{
TY532811 LOC20;
Ropeobj178006* LOC21;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = addrloc_538204_839829468(a0);
LOC20[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC21 = (Ropeobj178006*)0;
LOC21 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_377), LOC20, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC21, a0.s);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
Tloc292816 b0;
TY532811 LOC34;
Ttype292840* LOC35;
Ropeobj178006* LOC36;
memset((void*)(&b0), 0, sizeof(b0));
switch ((*a0.t).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY178507 LOC24;
Ropeobj178006* LOC25;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = rdloc_538188_839829468(a0);
LOC25 = (Ropeobj178006*)0;
LOC25 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_378), LOC24, 1);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC25, a0.s);
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY532811 LOC27;
Ropeobj178006* LOC28;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = rdloc_538188_839829468(a0);
LOC27[1] = lenfield_539305_839829468(p0);
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_379), LOC27, 2);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC28, a0.s);
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC30;
NI64 LOC31;
Ropeobj178006* LOC32;
memset((void*)LOC30, 0, sizeof(LOC30));
LOC30[0] = rdloc_538188_839829468(a0);
LOC31 = (NI64)0;
LOC31 = lengthord_320007_3876443242(a0.t);
LOC30[1] = rope_178401_2381377266(LOC31);
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC30, 2);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC32, a0.s);
}
break;
default:
{
internalerror_196100_155036129((*(*e0).kindU.S6.sons->data[((NI) 0)]).info, ((NimStringDesc*) &T839829468_381));
}
break;
}
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = rdloc_538188_839829468(b0);
LOC35 = (Ttype292840*)0;
LOC35 = elemtype_320394_3876443242(t0);
LOC34[1] = gentypeinfo_535941_839829468((*p0).module, LOC35);
LOC36 = (Ropeobj178006*)0;
LOC36 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_382), LOC34, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC36, a0.s);
}
break;
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 5):
case ((Ttypekind292244) 24):
{
TY532811 LOC38;
Ropeobj178006* LOC39;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = rdloc_538188_839829468(a0);
LOC38[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC39 = (Ropeobj178006*)0;
LOC39 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC38, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC39, a0.s);
}
break;
case ((Ttypekind292244) 3):
case ((Ttypekind292244) 62):
{
localerror_196085_155036129((*e0).info, ((NimStringDesc*) &T839829468_384));
}
break;
default:
{
TY532811 LOC42;
Ropeobj178006* LOC43;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = addrloc_538204_839829468(a0);
LOC42[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC43 = (Ropeobj178006*)0;
LOC43 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC42, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC43, a0.s);
}
break;
}
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gengettypeinfo_555383_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468((*p0).module, t0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC1, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genswap_555638_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 tmp0;
Ttype292840* LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
gettemp_537032_839829468(p0, LOC1, (&tmp0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
genassignment_539264_839829468(p0, tmp0, a0, 0);
genassignment_539264_839829468(p0, a0, b0, 0);
genassignment_539264_839829468(p0, b0, tmp0, 0);
}
N_NIMCALL(void, unaryexpr_551209_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binarystmt_550501_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC5;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_387));
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(a0);
LOC5[1] = rdloc_538188_839829468(b0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC5, 2);
}
N_NIMCALL(void, genstrconcat_554452_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 tmp0;
NI L0;
Ropeobj178006* appends0;
Ropeobj178006* lens0;
TY535238 LOC21;
Ropeobj178006** LOC22;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE);
L0 = ((NI) 0);
appends0 = NIM_NIL;
lens0 = NIM_NIL;
{
NI i_554475_839829468;
NI HEX3Atmp_554547_839829468;
NI LOC2;
NI res_554550_839829468;
i_554475_839829468 = (NI)0;
HEX3Atmp_554547_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554547_839829468 = (NI)(LOC2 - ((NI) 2));
res_554550_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554550_839829468 <= HEX3Atmp_554547_839829468)) goto LA4;
i_554475_839829468 = res_554550_839829468;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))], (&a0));
{
Ttype292840* LOC7;
TY532811 LOC10;
Ropeobj178006* LOC11;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).typ, IL64(211106242013440));
if (!((*LOC7).kind == ((Ttypekind292244) 2))) goto LA8;
L0 += ((NI) 1);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = tmp0.r;
LOC10[1] = rdloc_538188_839829468(a0);
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2);
add_178482_2381377266(&appends0, LOC11);
}
goto LA5;
LA8: ;
{
TY532811 LOC19;
Ropeobj178006* LOC20;
{
if (!((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kind >= ((Tnodekind292020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kind <= ((Tnodekind292020) 22))) goto LA15;
L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kindU.S3.strval->Sup.len : 0);
}
goto LA13;
LA15: ;
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(a0);
LOC18[1] = lenfield_539305_839829468(p0);
addf_179205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2);
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = tmp0.r;
LOC19[1] = rdloc_538188_839829468(a0);
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2);
add_178482_2381377266(&appends0, LOC20);
}
LA5: ;
res_554550_839829468 += ((NI) 1);
} LA4: ;
}
}
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = tmp0.r;
LOC21[1] = lens0;
LOC21[2] = rope_178401_2381377266(((NI64) (L0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_393), LOC21, 3);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC22, appends0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA25;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA23;
LA25: ;
{
genassignment_539264_839829468(p0, (*d0), tmp0, 0);
}
LA23: ;
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, genstrappend_554554_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 dest0;
Ropeobj178006* appends0;
Ropeobj178006* lens0;
NI L0;
TY535238 LOC21;
Ropeobj178006** LOC22;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&dest0), 0, sizeof(dest0));
appends0 = (Ropeobj178006*)0;
lens0 = (Ropeobj178006*)0;
L0 = ((NI) 0);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&dest0));
{
NI i_554615_839829468;
NI HEX3Atmp_554676_839829468;
NI LOC2;
NI res_554679_839829468;
i_554615_839829468 = (NI)0;
HEX3Atmp_554676_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554676_839829468 = (NI)(LOC2 - ((NI) 3));
res_554679_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554679_839829468 <= HEX3Atmp_554676_839829468)) goto LA4;
i_554615_839829468 = res_554679_839829468;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))], (&a0));
{
Ttype292840* LOC7;
TY532811 LOC10;
Ropeobj178006* LOC11;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).typ, IL64(211106242013440));
if (!((*LOC7).kind == ((Ttypekind292244) 2))) goto LA8;
L0 += ((NI) 1);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468(dest0);
LOC10[1] = rdloc_538188_839829468(a0);
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2);
add_178482_2381377266(&appends0, LOC11);
}
goto LA5;
LA8: ;
{
TY532811 LOC19;
Ropeobj178006* LOC20;
{
if (!((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kind >= ((Tnodekind292020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kind <= ((Tnodekind292020) 22))) goto LA15;
L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kindU.S3.strval->Sup.len : 0);
}
goto LA13;
LA15: ;
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(a0);
LOC18[1] = lenfield_539305_839829468(p0);
addf_179205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2);
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468(dest0);
LOC19[1] = rdloc_538188_839829468(a0);
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2);
add_178482_2381377266(&appends0, LOC20);
}
LA5: ;
res_554679_839829468 += ((NI) 1);
} LA4: ;
}
}
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdloc_538188_839829468(dest0);
LOC21[1] = lens0;
LOC21[2] = rope_178401_2381377266(((NI64) (L0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_395), LOC21, 3);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC22, appends0);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, genseqelemappend_554683_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
NimStringDesc* seqappendpattern0;
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 dest0;
Ttype292840* bt0;
TY535238 LOC8;
Ttype292840* LOC9;
TY532811 LOC10;
TY532811 LOC11;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_396));
}
goto LA1;
LA5: ;
{
seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_397));
}
LA1: ;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&dest0), 0, sizeof(dest0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
bt0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 2)]).typ, IL64(211106240964864));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC8[1] = gettypedesc_535671_839829468((*p0).module, LOC9);
LOC8[2] = gettypedesc_535671_839829468((*p0).module, bt0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), seqappendpattern0, LOC8, 3);
initloc_532273_839829468((&dest0), ((Tlockind292808) 6), bt0, ((Tstorageloc292812) 3));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468(a0);
LOC10[1] = lenfield_539305_839829468(p0);
dest0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_398), LOC10, 2);
genassignment_539264_839829468(p0, dest0, b0, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdloc_538188_839829468(a0);
LOC11[1] = lenfield_539305_839829468(p0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_399), LOC11, 2);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, binaryexpr_550549_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
LOC1[1] = rdloc_538188_839829468(b0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genstrequals_556666_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 x0;
Tnode292802* a0;
Tnode292802* b0;
memset((void*)(&x0), 0, sizeof(x0));
a0 = (*e0).kindU.S6.sons->data[((NI) 1)];
b0 = (*e0).kindU.S6.sons->data[((NI) 2)];
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*a0).kind == ((Tnodekind292020) 23));
if (LOC3) goto LA4;
LOC3 = ((*b0).kind == ((Tnodekind292020) 23));
LA4: ;
if (!LOC3) goto LA5;
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341));
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
TY532811 LOC12;
Ropeobj178006* LOC13;
LOC8 = (NIM_BOOL)0;
LOC8 = ((*a0).kind >= ((Tnodekind292020) 20) && (*a0).kind <= ((Tnodekind292020) 22));
if (!(LOC8)) goto LA9;
LOC8 = (((*a0).kindU.S3.strval) && ((*a0).kindU.S3.strval)->Sup.len == 0);
LA9: ;
if (!LOC8) goto LA10;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&x0));
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468(x0);
LOC12[1] = lenfield_539305_839829468(p0);
LOC13 = (Ropeobj178006*)0;
LOC13 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC12, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC13, ((Tstorageloc292812) 0));
}
goto LA1;
LA10: ;
{
NIM_BOOL LOC15;
TY532811 LOC19;
Ropeobj178006* LOC20;
LOC15 = (NIM_BOOL)0;
LOC15 = ((*b0).kind >= ((Tnodekind292020) 20) && (*b0).kind <= ((Tnodekind292020) 22));
if (!(LOC15)) goto LA16;
LOC15 = (((*b0).kindU.S3.strval) && ((*b0).kindU.S3.strval)->Sup.len == 0);
LA16: ;
if (!LOC15) goto LA17;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&x0));
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468(x0);
LOC19[1] = lenfield_539305_839829468(p0);
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC19, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC20, ((Tstorageloc292812) 0));
}
goto LA1;
LA17: ;
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_401));
}
LA1: ;
}
N_NIMCALL(void, genisnil_552620_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* t0;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
if (!LOC3) goto LA5;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_404));
}
goto LA1;
LA5: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_405));
}
LA1: ;
}
N_NIMCALL(void, gendollar_555391_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
a0.r = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA4;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA4: ;
genassignment_539264_839829468(p0, (*d0), a0, 0);
gcusage_554439_839829468(n0);
}
N_NIMCALL(Ropeobj178006*, genofhelper_555139_839829468)(Tcproc529021* p0, Ttype292840* dest0, Ropeobj178006* a0) {
Ropeobj178006* result0;
Ropeobj178006* ti0;
result0 = (Ropeobj178006*)0;
ti0 = gentypeinfo_535941_839829468((*p0).module, dest0);
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY532811 LOC9;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*dest0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*(*p0).module).flags &(1U<<((NU)(((Codegenflag529025) 5))&7U)))!=0);
if (!(LOC5)) goto LA6;
LOC5 = !((((*dest0).flags &(1U<<((NU)(((Ttypeflag292431) 5))&31U)))!=0));
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = a0;
LOC9[1] = ti0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_414), LOC9, 2);
}
goto LA1;
LA7: ;
{
Ropeobj178006* LOC11;
Ropeobj178006* cache0;
Ropeobj178006* LOC12;
TY178507 LOC13;
TY535238 LOC14;
LOC11 = (Ropeobj178006*)0;
LOC11 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_129));
(*(*p0).module).labels += ((NI) 1);
LOC12 = (Ropeobj178006*)0;
LOC12 = rope_178401_2381377266(((NI64) ((*(*p0).module).labels)));
cache0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_415), LOC12);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = cache0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_416), LOC13, 1);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = a0;
LOC14[1] = ti0;
LOC14[2] = cache0;
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_417), LOC14, 3);
}
LA1: ;
return result0;
}
N_NIMCALL(void, genof_555201_839829468)(Tcproc529021* p0, Tnode292802* x0, Ttype292840* typ0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* dest0;
Ropeobj178006* r0;
Ropeobj178006* nilcheck0;
Ttype292840* t0;
Ttype292840* LOC41;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, x0, (&a0));
dest0 = skiptypes_296099_850551059(typ0, IL64(211106247256320));
r0 = rdloc_538188_839829468(a0);
nilcheck0 = NIM_NIL;
t0 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
{
while (1) {
Ttype292840* LOC16;
if (!((*t0).kind == ((Ttypekind292244) 23) || (*t0).kind == ((Ttypekind292244) 21) || (*t0).kind == ((Ttypekind292244) 22))) goto LA2;
{
if (!!(((*t0).kind == ((Ttypekind292244) 23)))) goto LA5;
nilcheck0 = r0;
}
LA5: ;
{
NIM_BOOL LOC9;
NIM_BOOL LOC11;
TY178507 LOC15;
LOC9 = (NIM_BOOL)0;
LOC9 = !(((*t0).kind == ((Ttypekind292244) 23)));
if (LOC9) goto LA10;
LOC11 = (NIM_BOOL)0;
LOC11 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC11) goto LA12;
LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA12: ;
LOC9 = !(LOC11);
LA10: ;
if (!LOC9) goto LA13;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = r0;
r0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC15, 1);
}
LA13: ;
LOC16 = (Ttype292840*)0;
LOC16 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC16, IL64(211106232576256));
} LA2: ;
}
{
NIM_BOOL LOC19;
LOC19 = (NIM_BOOL)0;
LOC19 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC19) goto LA20;
LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA20: ;
if (!!(LOC19)) goto LA21;
{
while (1) {
NIM_BOOL LOC25;
TY533289 LOC27;
Ropeobj178006* LOC28;
LOC25 = (NIM_BOOL)0;
LOC25 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC25)) goto LA26;
LOC25 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
LA26: ;
if (!LOC25) goto LA24;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_153), LOC27, 0);
add_178482_2381377266(&r0, LOC28);
t0 = skiptypes_296099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360));
} LA24: ;
}
}
LA21: ;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = isobjlackingtypefield_533513_839829468(t0);
if (!LOC31) goto LA32;
globalerror_196071_155036129((*x0).info, ((Tmsgkind191002) 4), ((NimStringDesc*) &T839829468_412));
}
LA32: ;
{
TY532811 LOC38;
if (!!((nilcheck0 == NIM_NIL))) goto LA36;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = nilcheck0;
LOC38[1] = genofhelper_555139_839829468(p0, dest0, r0);
r0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_413), LOC38, 2);
}
goto LA34;
LA36: ;
{
TY178507 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = genofhelper_555139_839829468(p0, dest0, r0);
r0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_418), LOC40, 1);
}
LA34: ;
LOC41 = (Ttype292840*)0;
LOC41 = getsystype_338150_3937434831(((Ttypekind292244) 1));
putintodest_550468_839829468(p0, d0, LOC41, r0, a0.s);
}
N_NIMCALL(void, genof_555331_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
genof_555201_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (*(*n0).kindU.S6.sons->data[((NI) 2)]).typ, d0);
}
N_NIMCALL(void, rawgennew_554741_839829468)(Tcproc529021* p0, Tloc292816 a0, Ropeobj178006* sizeexpr_554745_839829468) {
Ropeobj178006* sizeexpr0;
Ttype292840* reftype0;
Tloc292816 b0;
TY535238 args0;
Ttype292840* bt0;
sizeexpr0 = sizeexpr_554745_839829468;
reftype0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
memset((void*)(&b0), 0, sizeof(b0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), a0.t, ((Tstorageloc292812) 3));
{
TY178507 LOC5;
Ttype292840* LOC6;
if (!sizeexpr0 == 0) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832));
LOC5[0] = gettypedesc_535671_839829468((*p0).module, LOC6);
sizeexpr0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_419), LOC5, 1);
}
LA3: ;
memset((void*)args0, 0, sizeof(args0));
args0[0] = gettypedesc_535671_839829468((*p0).module, reftype0);
args0[1] = gentypeinfo_535941_839829468((*p0).module, reftype0);
args0[2] = sizeexpr0;
{
NIM_BOOL LOC9;
TY532811 LOC21;
LOC9 = (NIM_BOOL)0;
LOC9 = (a0.s == ((Tstorageloc292812) 3));
if (!(LOC9)) goto LA10;
LOC9 = usesnativegc_169177_2607990831();
LA10: ;
if (!LOC9) goto LA11;
{
NIM_BOOL LOC15;
TY178507 LOC18;
LOC15 = (NIM_BOOL)0;
LOC15 = canformacycle_320123_3876443242(a0.t);
if (!LOC15) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_420), LOC18, 1);
}
goto LA13;
LA16: ;
{
TY178507 LOC20;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC20, 1);
}
LA13: ;
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_421), args0, 3);
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdloc_538188_839829468(a0);
LOC21[1] = rdloc_538188_839829468(b0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC21, 2);
}
goto LA7;
LA11: ;
{
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_422), args0, 3);
genassignment_539264_839829468(p0, a0, b0, 0);
}
LA7: ;
bt0 = skiptypes_296099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832));
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), bt0, a0, NIM_FALSE);
}
N_NIMCALL(void, gennew_554782_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
{
NI LOC3;
Tloc292816 se0;
Ropeobj178006* LOC6;
LOC3 = (NI)0;
LOC3 = len_293081_850551059(e0);
if (!(LOC3 == ((NI) 3))) goto LA4;
memset((void*)(&se0), 0, sizeof(se0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&se0));
LOC6 = (Ropeobj178006*)0;
LOC6 = rdloc_538188_839829468(se0);
rawgennew_554741_839829468(p0, a0, LOC6);
}
goto LA1;
LA4: ;
{
rawgennew_554741_839829468(p0, a0, NIM_NIL);
}
LA1: ;
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewfinalize_555110_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 f0;
Ttype292840* reftype0;
Ttype292840* bt0;
Ropeobj178006* ti0;
TY532811 LOC1;
TY535238 LOC2;
Ttype292840* LOC3;
Ttype292840* LOC4;
Ttype292840* LOC5;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&f0), 0, sizeof(f0));
reftype0 = (Ttype292840*)0;
bt0 = (Ttype292840*)0;
ti0 = (Ropeobj178006*)0;
reftype0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&f0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), a0.t, ((Tstorageloc292812) 3));
ti0 = gentypeinfo_535941_839829468((*p0).module, reftype0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = ti0;
LOC1[1] = rdloc_538188_839829468(f0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_423), LOC1, 2);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = gettypedesc_535671_839829468((*p0).module, reftype0);
LOC2[1] = ti0;
LOC3 = (Ttype292840*)0;
LOC3 = lastson_295377_850551059(reftype0);
LOC4 = (Ttype292840*)0;
LOC4 = skiptypes_296099_850551059(LOC3, IL64(211106233624832));
LOC2[2] = gettypedesc_535671_839829468((*p0).module, LOC4);
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_424), LOC2, 3);
genassignment_539264_839829468(p0, a0, b0, 0);
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(reftype0);
bt0 = skiptypes_296099_850551059(LOC5, IL64(211106233624832));
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), bt0, a0, NIM_FALSE);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewseqaux_554795_839829468)(Tcproc529021* p0, Tloc292816 dest0, Ropeobj178006* length0) {
Ttype292840* seqtype0;
TY535238 args0;
Tloc292816 call0;
seqtype0 = skiptypes_296099_850551059(dest0.t, IL64(211106242013440));
memset((void*)args0, 0, sizeof(args0));
args0[0] = gettypedesc_535671_839829468((*p0).module, seqtype0);
args0[1] = gentypeinfo_535941_839829468((*p0).module, seqtype0);
args0[2] = length0;
memset((void*)(&call0), 0, sizeof(call0));
initloc_532273_839829468((&call0), ((Tlockind292808) 6), dest0.t, ((Tstorageloc292812) 3));
{
NIM_BOOL LOC3;
TY532811 LOC15;
LOC3 = (NIM_BOOL)0;
LOC3 = (dest0.s == ((Tstorageloc292812) 3));
if (!(LOC3)) goto LA4;
LOC3 = usesnativegc_169177_2607990831();
LA4: ;
if (!LOC3) goto LA5;
{
NIM_BOOL LOC9;
TY178507 LOC12;
LOC9 = (NIM_BOOL)0;
LOC9 = canformacycle_320123_3876443242(dest0.t);
if (!LOC9) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_420), LOC12, 1);
}
goto LA7;
LA10: ;
{
TY178507 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC14, 1);
}
LA7: ;
call0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_425), args0, 3);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = rdloc_538188_839829468(dest0);
LOC15[1] = rdloc_538188_839829468(call0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC15, 2);
}
goto LA1;
LA5: ;
{
call0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_426), args0, 3);
genassignment_539264_839829468(p0, dest0, call0, 0);
}
LA1: ;
}
N_NIMCALL(void, gennewseq_554824_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 b0;
Ropeobj178006* LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
LOC1 = (Ropeobj178006*)0;
LOC1 = rdloc_538188_839829468(b0);
gennewseqaux_554795_839829468(p0, a0, LOC1);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewseqofcap_554836_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* seqtype0;
Tloc292816 a0;
TY535238 LOC1;
Ropeobj178006* LOC2;
seqtype0 = skiptypes_296099_850551059((*e0).typ, IL64(211106242013440));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = gettypedesc_535671_839829468((*p0).module, seqtype0);
LOC1[1] = gentypeinfo_535941_839829468((*p0).module, seqtype0);
LOC1[2] = rdloc_538188_839829468(a0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_427), LOC1, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
gcusage_554439_839829468(e0);
}
N_NIMCALL(Ropeobj178006*, getclosuretype_535683_839829468)(Tcgen529027* m0, Ttype292840* t0, Tclosuretypekind535679 kind0) {
Ropeobj178006* result0;
Intset268030 check0;
Ropeobj178006* rettype0;
Ropeobj178006* desc0;
result0 = (Ropeobj178006*)0;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
result0 = gettempname_533596_839829468(m0);
rettype0 = (Ropeobj178006*)0;
desc0 = (Ropeobj178006*)0;
genprocparams_534115_839829468(m0, t0, &rettype0, &desc0, (&check0), !((kind0 == ((Tclosuretypekind535679) 0))), NIM_FALSE);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isimportedtype_533449_839829468(t0);
if (!!(LOC3)) goto LA4;
{
NIM_BOOL LOC8;
TY535235 LOC12;
LOC8 = (NIM_BOOL)0;
LOC8 = !(((*t0).callconv == ((Tcallingconvention292002) 8)));
if (LOC8) goto LA9;
LOC8 = !((kind0 == ((Tclosuretypekind535679) 2)));
LA9: ;
if (!LOC8) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178277_2381377266(Callingconvtostr_533585_839829468[((*t0).callconv)- 0]);
LOC12[1] = rettype0;
LOC12[2] = result0;
LOC12[3] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC12, 4);
}
goto LA6;
LA10: ;
{
TY535238 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = result0;
LOC14[1] = rettype0;
LOC14[2] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC14, 3);
}
LA6: ;
}
LA4: ;
return result0;
}
N_NIMCALL(void, gensomecast_556480_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* etyp0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
etyp0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
TY532811 LOC7;
Ropeobj178006* LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*etyp0).kind == ((Ttypekind292244) 18) || (*etyp0).kind == ((Ttypekind292244) 17) || (*etyp0).kind == ((Ttypekind292244) 16) || (*etyp0).kind == ((Ttypekind292244) 27) || (*etyp0).kind == ((Ttypekind292244) 48) || (*etyp0).kind == ((Ttypekind292244) 4));
if (!(LOC3)) goto LA4;
LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0));
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535671_839829468((*p0).module, (*e0).typ);
LOC7[1] = addrloc_538204_839829468(a0);
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_429), LOC7, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC8, a0.s);
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC10;
TY532811 LOC14;
Ropeobj178006* LOC15;
LOC10 = (NIM_BOOL)0;
LOC10 = ((*etyp0).kind == ((Ttypekind292244) 25));
if (!(LOC10)) goto LA11;
LOC10 = ((*etyp0).callconv == ((Tcallingconvention292002) 8));
LA11: ;
if (!LOC10) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = getclosuretype_535683_839829468((*p0).module, etyp0, ((Tclosuretypekind535679) 1));
LOC14[1] = rdcharloc_538227_839829468(a0);
LOC15 = (Ropeobj178006*)0;
LOC15 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC14, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC15, a0.s);
}
goto LA1;
LA12: ;
{
TY532811 LOC17;
Ropeobj178006* LOC18;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535671_839829468((*p0).module, (*e0).typ);
LOC17[1] = rdcharloc_538227_839829468(a0);
LOC18 = (Ropeobj178006*)0;
LOC18 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC17, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC18, a0.s);
}
LA1: ;
}
N_NIMCALL(void, unaryexprchar_551222_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdcharloc_538227_839829468(a0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genord_556474_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_301));
}
N_NIMCALL(void, genarraylen_555415_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tnode292802* a0;
Ttype292840* typ0;
a0 = (*e0).kindU.S6.sons->data[((NI) 1)];
{
if (!((*a0).kind == ((Tnodekind292020) 64))) goto LA3;
a0 = (*a0).kindU.S6.sons->data[((NI) 0)];
}
LA3: ;
typ0 = skiptypes_296099_850551059((*a0).typ, IL64(211106240964864));
switch ((*typ0).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
{
if (!(op0 == ((Tmagic292524) 8))) goto LA8;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_431));
}
goto LA6;
LA8: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_432));
}
LA6: ;
}
break;
case ((Ttypekind292244) 29):
{
usestringh_532345_839829468((*p0).module);
{
if (!(op0 == ((Tmagic292524) 8))) goto LA14;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_433));
}
goto LA12;
LA14: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_434));
}
LA12: ;
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC20) goto LA21;
LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA21: ;
if (!!(LOC20)) goto LA22;
{
if (!(op0 == ((Tmagic292524) 8))) goto LA26;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_435));
}
goto LA24;
LA26: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_436));
}
LA24: ;
}
goto LA18;
LA22: ;
{
{
if (!(op0 == ((Tmagic292524) 8))) goto LA32;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_437));
}
goto LA30;
LA32: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_438));
}
LA30: ;
}
LA18: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
{
NI64 LOC40;
Ropeobj178006* LOC41;
if (!(op0 == ((Tmagic292524) 8))) goto LA38;
LOC40 = (NI64)0;
LOC40 = lastord_320004_3876443242(typ0);
LOC41 = (Ropeobj178006*)0;
LOC41 = rope_178401_2381377266(LOC40);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC41, ((Tstorageloc292812) 0));
}
goto LA36;
LA38: ;
{
NI64 LOC43;
Ropeobj178006* LOC44;
LOC43 = (NI64)0;
LOC43 = lengthord_320007_3876443242(typ0);
LOC44 = (Ropeobj178006*)0;
LOC44 = rope_178401_2381377266(LOC43);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC44, ((Tstorageloc292812) 0));
}
LA36: ;
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_439));
}
break;
}
}
N_NIMCALL(void, unarystmt_550527_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC5;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_442));
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(a0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC5, 1);
}
N_NIMCALL(void, gensetlengthstr_555632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
binarystmt_550501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_445));
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gensetlengthseq_555500_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* t0;
NimStringDesc* setlenpattern0;
TY535235 LOC8;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
setlenpattern0 = copyString(((NimStringDesc*) &T839829468_446));
}
goto LA1;
LA5: ;
{
setlenpattern0 = copyString(((NimStringDesc*) &T839829468_447));
}
LA1: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC8[1] = rdloc_538188_839829468(b0);
LOC8[2] = gettypedesc_535671_839829468((*p0).module, t0);
LOC8[3] = gettypedesc_535671_839829468((*p0).module, (*t0).sons->data[((NI) 0)]);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), setlenpattern0, LOC8, 4);
gcusage_554439_839829468(e0);
}
N_NIMCALL(Ropeobj178006*, rdsetelemloc_555662_839829468)(Tloc292816 a0, Ttype292840* settype0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = rdcharloc_538227_839829468(a0);
{
NI64 LOC3;
TY532811 LOC6;
NI64 LOC7;
LOC3 = (NI64)0;
LOC3 = firstord_320001_3876443242(settype0);
if (!!((LOC3 == IL64(0)))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = result0;
LOC7 = (NI64)0;
LOC7 = firstord_320001_3876443242(settype0);
LOC6[1] = rope_178401_2381377266(LOC7);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_448), LOC6, 2);
}
LA4: ;
return result0;
}
N_NIMCALL(void, binarystmtinexcl_555857_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
LOC1[1] = rdsetelemloc_555662_839829468(b0, a0.t);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC1, 2);
}
N_NIMCALL(void, binaryexprchar_550809_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdcharloc_538227_839829468(a0);
LOC1[1] = rdcharloc_538227_839829468(b0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(NIM_BOOL, fewcmps_555803_839829468)(Tnode292802* s0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
if (!!(((*s0).kind == ((Tnodekind292020) 39)))) goto LA3;
internalerror_196100_155036129((*s0).info, ((NimStringDesc*) &T839829468_463));
}
LA3: ;
{
NIM_BOOL LOC7;
NI64 LOC8;
LOC7 = (NIM_BOOL)0;
LOC8 = (NI64)0;
LOC8 = getsize_320135_3876443242((*s0).typ);
LOC7 = (LOC8 <= ((NI64) (intsize_176641_4151366050)));
if (!(LOC7)) goto LA9;
LOC7 = (((*s0).flags &(1U<<((NU)(((Tnodeflag292427) 4))&15U)))!=0);
LA9: ;
if (!LOC7) goto LA10;
result0 = NIM_FALSE;
}
goto LA5;
LA10: ;
{
Ttype292840* LOC13;
LOC13 = (Ttype292840*)0;
LOC13 = elemtype_320394_3876443242((*s0).typ);
if (!((*LOC13).kind == ((Ttypekind292244) 31) || (*LOC13).kind >= ((Ttypekind292244) 33) && (*LOC13).kind <= ((Ttypekind292244) 35))) goto LA14;
result0 = NIM_TRUE;
}
goto LA5;
LA14: ;
{
NI LOC17;
LOC17 = (NI)0;
LOC17 = sonslen_295351_850551059(s0);
result0 = (LOC17 <= ((NI) 8));
}
LA5: ;
return result0;
}
N_NIMCALL(void, binaryexprin_555837_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0, NimStringDesc* frmt0) {
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((*a0));
LOC1[1] = rdsetelemloc_555662_839829468((*b0), (*a0).t);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, geninexpraux_553496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0) {
Ttype292840* LOC1;
NI64 LOC2;
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(LOC1);
switch (((NI) (LOC2))) {
case ((NI) 1):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_467));
}
break;
case ((NI) 2):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_468));
}
break;
case ((NI) 4):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_469));
}
break;
case ((NI) 8):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_470));
}
break;
default:
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_471));
}
break;
}
}
N_NIMCALL(void, geninop_556009_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 x0;
Tloc292816 y0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&x0), 0, sizeof(x0));
memset((void*)(&y0), 0, sizeof(y0));
{
NIM_BOOL LOC3;
Tnode292802* ea0;
NI length0;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 39));
if (!(LOC3)) goto LA4;
LOC3 = fewcmps_555803_839829468((*e0).kindU.S6.sons->data[((NI) 1)]);
LA4: ;
if (!LOC3) goto LA5;
{
if (!((*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 70) || (*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 69))) goto LA9;
ea0 = (*(*e0).kindU.S6.sons->data[((NI) 2)]).kindU.S6.sons->data[((NI) 0)];
}
goto LA7;
LA9: ;
{
ea0 = (*e0).kindU.S6.sons->data[((NI) 2)];
}
LA7: ;
initlocexpr_539283_839829468(p0, ea0, (&a0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), (*e0).typ, ((Tstorageloc292812) 0));
b0.r = rope_178277_2381377266(((NimStringDesc*) &T839829468_118));
length0 = sonslen_295351_850551059((*e0).kindU.S6.sons->data[((NI) 1)]);
{
NI i_556061_839829468;
NI HEX3Atmp_556412_839829468;
NI res_556415_839829468;
i_556061_839829468 = (NI)0;
HEX3Atmp_556412_839829468 = (NI)0;
HEX3Atmp_556412_839829468 = (NI)(length0 - ((NI) 1));
res_556415_839829468 = ((NI) 0);
{
while (1) {
if (!(res_556415_839829468 <= HEX3Atmp_556412_839829468)) goto LA14;
i_556061_839829468 = res_556415_839829468;
{
TY535238 LOC19;
if (!((*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kind == ((Tnodekind292020) 44))) goto LA17;
initlocexpr_539283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0));
initlocexpr_539283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0));
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdcharloc_538227_839829468(a0);
LOC19[1] = rdcharloc_538227_839829468(x0);
LOC19[2] = rdcharloc_538227_839829468(y0);
addf_179205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_464), LOC19, 3);
}
goto LA15;
LA17: ;
{
TY532811 LOC21;
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468], (&x0));
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdcharloc_538227_839829468(a0);
LOC21[1] = rdcharloc_538227_839829468(x0);
addf_179205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_465), LOC21, 2);
}
LA15: ;
{
if (!(i_556061_839829468 < (NI)(length0 - ((NI) 1)))) goto LA24;
add_178487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_466));
}
LA24: ;
res_556415_839829468 += ((NI) 1);
} LA14: ;
}
}
add_178487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_117));
putintodest_550468_839829468(p0, d0, (*e0).typ, b0.r, ((Tstorageloc292812) 0));
}
goto LA1;
LA5: ;
{
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
geninexpraux_553496_839829468(p0, e0, (&a0), (&b0), d0);
}
LA1: ;
}
N_NIMCALL(void, gensetop_556419_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 i0;
Ttype292840* settype0;
NI size0;
NI64 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&i0), 0, sizeof(i0));
settype0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(settype0);
size0 = ((NI) (LOC1));
switch (size0) {
case ((NI) 1):
case ((NI) 2):
case ((NI) 4):
case ((NI) 8):
{
switch (op0) {
case ((Tmagic292524) 39):
{
NimStringDesc* ts0;
NimStringDesc* LOC4;
NimStringDesc* LOC5;
NimStringDesc* LOC6;
LOC4 = (NimStringDesc*)0;
LOC5 = (NimStringDesc*)0;
LOC5 = nimIntToStr((NI)(size0 * ((NI) 8)));
LOC4 = rawNewString(LOC5->Sup.len + 2);
appendString(LOC4, ((NimStringDesc*) &T839829468_45));
appendString(LOC4, LOC5);
ts0 = LOC4;
LOC6 = (NimStringDesc*)0;
LOC6 = rawNewString(ts0->Sup.len + ts0->Sup.len + 35);
appendString(LOC6, ((NimStringDesc*) &T839829468_449));
appendString(LOC6, ts0);
appendString(LOC6, ((NimStringDesc*) &T839829468_450));
appendString(LOC6, ts0);
appendString(LOC6, ((NimStringDesc*) &T839829468_451));
binarystmtinexcl_555857_839829468(p0, e0, d0, LOC6);
}
break;
case ((Tmagic292524) 40):
{
NimStringDesc* ts0;
NimStringDesc* LOC8;
NimStringDesc* LOC9;
NimStringDesc* LOC10;
LOC8 = (NimStringDesc*)0;
LOC9 = (NimStringDesc*)0;
LOC9 = nimIntToStr((NI)(size0 * ((NI) 8)));
LOC8 = rawNewString(LOC9->Sup.len + 2);
appendString(LOC8, ((NimStringDesc*) &T839829468_45));
appendString(LOC8, LOC9);
ts0 = LOC8;
LOC10 = (NimStringDesc*)0;
LOC10 = rawNewString(ts0->Sup.len + ts0->Sup.len + 42);
appendString(LOC10, ((NimStringDesc*) &T839829468_452));
appendString(LOC10, ts0);
appendString(LOC10, ((NimStringDesc*) &T839829468_453));
appendString(LOC10, ts0);
appendString(LOC10, ((NimStringDesc*) &T839829468_454));
binarystmtinexcl_555857_839829468(p0, e0, d0, LOC10);
}
break;
case ((Tmagic292524) 41):
{
{
if (!(size0 <= ((NI) 4))) goto LA14;
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_455));
}
goto LA12;
LA14: ;
{
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_456));
}
LA12: ;
}
break;
case ((Tmagic292524) 133):
{
binaryexprchar_550809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_457));
}
break;
case ((Tmagic292524) 132):
{
binaryexprchar_550809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_458));
}
break;
case ((Tmagic292524) 131):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341));
}
break;
case ((Tmagic292524) 134):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_459));
}
break;
case ((Tmagic292524) 135):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_460));
}
break;
case ((Tmagic292524) 136):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_461));
}
break;
case ((Tmagic292524) 137):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_462));
}
break;
case ((Tmagic292524) 148):
{
geninop_556009_839829468(p0, e0, d0);
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_472));
}
break;
}
}
break;
default:
{
switch (op0) {
case ((Tmagic292524) 39):
{
binarystmtinexcl_555857_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_473));
}
break;
case ((Tmagic292524) 40):
{
binarystmtinexcl_555857_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_474));
}
break;
case ((Tmagic292524) 41):
{
NimStringDesc* LOC30;
NimStringDesc* LOC31;
LOC30 = (NimStringDesc*)0;
LOC31 = (NimStringDesc*)0;
LOC31 = nimIntToStr(size0);
LOC30 = rawNewString(LOC31->Sup.len + 14);
appendString(LOC30, ((NimStringDesc*) &T839829468_475));
appendString(LOC30, LOC31);
appendChar(LOC30, 41);
unaryexprchar_551222_839829468(p0, e0, d0, LOC30);
}
break;
case ((Tmagic292524) 133):
case ((Tmagic292524) 132):
{
Ttype292840* LOC33;
TY536475 LOC39;
LOC33 = (Ttype292840*)0;
LOC33 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC33, (&i0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
Ttype292840* LOC38;
if (!((*d0).k == ((Tlockind292808) 0))) goto LA36;
LOC38 = (Ttype292840*)0;
LOC38 = getsystype_338150_3937434831(((Ttypekind292244) 1));
gettemp_537032_839829468(p0, LOC38, d0, NIM_FALSE);
}
LA36: ;
memset((void*)LOC39, 0, sizeof(LOC39));
LOC39[0] = rdloc_538188_839829468(i0);
LOC39[1] = rope_178401_2381377266(((NI64) (size0)));
LOC39[2] = rdloc_538188_839829468((*d0));
LOC39[3] = rdloc_538188_839829468(a0);
LOC39[4] = rdloc_538188_839829468(b0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), lookupopr_556426_839829468[(op0)- 132], LOC39, 5);
}
break;
case ((Tmagic292524) 131):
{
NimStringDesc* LOC41;
NimStringDesc* LOC42;
usestringh_532345_839829468((*p0).module);
LOC41 = (NimStringDesc*)0;
LOC42 = (NimStringDesc*)0;
LOC42 = nimIntToStr(size0);
LOC41 = rawNewString(LOC42->Sup.len + 21);
appendString(LOC41, ((NimStringDesc*) &T839829468_481));
appendString(LOC41, LOC42);
appendString(LOC41, ((NimStringDesc*) &T839829468_482));
binaryexprchar_550809_839829468(p0, e0, d0, LOC41);
}
break;
case ((Tmagic292524) 134):
case ((Tmagic292524) 135):
case ((Tmagic292524) 136):
case ((Tmagic292524) 137):
{
Ttype292840* LOC44;
TY536847 LOC49;
LOC44 = (Ttype292840*)0;
LOC44 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC44, (&i0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA47;
gettemp_537032_839829468(p0, a0.t, d0, NIM_FALSE);
}
LA47: ;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468(i0);
LOC49[1] = rope_178401_2381377266(((NI64) (size0)));
LOC49[2] = rdloc_538188_839829468((*d0));
LOC49[3] = rdloc_538188_839829468(a0);
LOC49[4] = rdloc_538188_839829468(b0);
LOC49[5] = rope_178277_2381377266(lookupopr_556426_839829468[(op0)- 132]);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_483), LOC49, 6);
}
break;
case ((Tmagic292524) 148):
{
geninop_556009_839829468(p0, e0, d0);
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_484));
}
break;
}
}
break;
}
}
static N_INLINE(Ropeobj178006*, genargstringtocstring_539776_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
TY178507 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_485), LOC1, 1);
return result0;
}
N_NIMCALL(Ropeobj178006*, openarrayloc_539665_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
Tnode292802* q0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
q0 = skipconv_328882_3876443242(n0);
{
Tmagic292524 LOC3;
Tloc292816 b0;
Tloc292816 c0;
Tnode292802* LOC6;
Tnode292802* LOC7;
Tnode292802* LOC8;
NimStringDesc* fmt0;
Ttype292840* LOC9;
TY535238 LOC25;
LOC3 = (Tmagic292524)0;
LOC3 = getmagic_318502_2616423590(q0);
if (!(LOC3 == ((Tmagic292524) 139))) goto LA4;
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&c0), 0, sizeof(c0));
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(q0, ((NI) 1));
initlocexpr_539283_839829468(p0, LOC6, (&a0));
LOC7 = (Tnode292802*)0;
LOC7 = HEX5BHEX5D_293238_850551059(q0, ((NI) 2));
initlocexpr_539283_839829468(p0, LOC7, (&b0));
LOC8 = (Tnode292802*)0;
LOC8 = HEX5BHEX5D_293238_850551059(q0, ((NI) 3));
initlocexpr_539283_839829468(p0, LOC8, (&c0));
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059(a0.t, IL64(211106243062016));
switch ((*LOC9).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
fmt0 = copyString(((NimStringDesc*) &T839829468_486));
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC14;
Ttype292840* LOC15;
NIM_BOOL LOC17;
LOC14 = (NIM_BOOL)0;
LOC15 = (Ttype292840*)0;
LOC15 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC14 = ((*LOC15).kind == ((Ttypekind292244) 23));
if (!(LOC14)) goto LA16;
LOC17 = (NIM_BOOL)0;
LOC17 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC17) goto LA18;
LOC17 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA18: ;
LOC14 = !(LOC17);
LA16: ;
if (!LOC14) goto LA19;
fmt0 = copyString(((NimStringDesc*) &T839829468_487));
}
goto LA12;
LA19: ;
{
fmt0 = copyString(((NimStringDesc*) &T839829468_488));
}
LA12: ;
}
break;
default:
{
NimStringDesc* LOC23;
NimStringDesc* LOC24;
LOC23 = (NimStringDesc*)0;
LOC24 = (NimStringDesc*)0;
LOC24 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC23 = rawNewString(LOC24->Sup.len + 14);
appendString(LOC23, ((NimStringDesc*) &T839829468_489));
appendString(LOC23, LOC24);
internalerror_196113_155036129(LOC23);
fmt0 = copyString(((NimStringDesc*) &T839829468_490));
}
break;
}
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = rdloc_538188_839829468(a0);
LOC25[1] = rdloc_538188_839829468(b0);
LOC25[2] = rdloc_538188_839829468(c0);
result0 = HEX25_178905_2381377266(fmt0, LOC25, 3);
}
goto LA1;
LA4: ;
{
Ttype292840* LOC27;
initlocexpr_539283_839829468(p0, n0, (&a0));
LOC27 = (Ttype292840*)0;
LOC27 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
switch ((*LOC27).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY178507 LOC29;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdloc_538188_839829468(a0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_378), LOC29, 1);
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC33;
Ttype292840* LOC34;
NIM_BOOL LOC36;
TY532811 LOC40;
LOC33 = (NIM_BOOL)0;
LOC34 = (Ttype292840*)0;
LOC34 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC33 = ((*LOC34).kind == ((Ttypekind292244) 23));
if (!(LOC33)) goto LA35;
LOC36 = (NIM_BOOL)0;
LOC36 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC36) goto LA37;
LOC36 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA37: ;
LOC33 = !(LOC36);
LA35: ;
if (!LOC33) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = rdloc_538188_839829468(a0);
LOC40[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_491), LOC40, 2);
}
goto LA31;
LA38: ;
{
TY532811 LOC42;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = rdloc_538188_839829468(a0);
LOC42[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_379), LOC42, 2);
}
LA31: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC44;
NI64 LOC45;
memset((void*)LOC44, 0, sizeof(LOC44));
LOC44[0] = rdloc_538188_839829468(a0);
LOC45 = (NI64)0;
LOC45 = lengthord_320007_3876443242(a0.t);
LOC44[1] = rope_178401_2381377266(LOC45);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC44, 2);
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 22):
{
Ttype292840* LOC47;
LOC47 = (Ttype292840*)0;
LOC47 = lastson_295377_850551059(a0.t);
switch ((*LOC47).kind) {
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY532811 LOC49;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468(a0);
LOC49[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_491), LOC49, 2);
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC51;
Ttype292840* LOC52;
NI64 LOC53;
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = rdloc_538188_839829468(a0);
LOC52 = (Ttype292840*)0;
LOC52 = lastson_295377_850551059(a0.t);
LOC53 = (NI64)0;
LOC53 = lengthord_320007_3876443242(LOC52);
LOC51[1] = rope_178401_2381377266(LOC53);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC51, 2);
}
break;
default:
{
NimStringDesc* LOC55;
NimStringDesc* LOC56;
LOC55 = (NimStringDesc*)0;
LOC56 = (NimStringDesc*)0;
LOC56 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC55 = rawNewString(LOC56->Sup.len + 14);
appendString(LOC55, ((NimStringDesc*) &T839829468_489));
appendString(LOC55, LOC56);
internalerror_196113_155036129(LOC55);
}
break;
}
}
break;
default:
{
NimStringDesc* LOC58;
NimStringDesc* LOC59;
LOC58 = (NimStringDesc*)0;
LOC59 = (NimStringDesc*)0;
LOC59 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC58 = rawNewString(LOC59->Sup.len + 14);
appendString(LOC58, ((NimStringDesc*) &T839829468_489));
appendString(LOC58, LOC59);
internalerror_196113_155036129(LOC58);
}
break;
}
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genarg_539787_839829468)(Tcproc529021* p0, Tnode292802* n_539790_839829468, Tsym292834* param0, Tnode292802* call0) {
Ropeobj178006* result0;
Tloc292816 a0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!((*n_539790_839829468).kind == ((Tnodekind292020) 71))) goto LA3;
result0 = genargstringtocstring_539776_839829468(p0, n_539790_839829468);
}
goto LA1;
LA3: ;
{
Ttype292840* LOC6;
Tnode292802* n0;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059((*param0).typ, IL64(211106240964864));
if (!((*LOC6).kind == ((Ttypekind292244) 27) || (*LOC6).kind == ((Ttypekind292244) 48))) goto LA7;
{
if (!!(((*n_539790_839829468).kind == ((Tnodekind292020) 64)))) goto LA11;
n0 = n_539790_839829468;
}
goto LA9;
LA11: ;
{
n0 = (*n_539790_839829468).kindU.S6.sons->data[((NI) 0)];
}
LA9: ;
result0 = openarrayloc_539665_839829468(p0, n0);
}
goto LA1;
LA7: ;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = ccgintroducedptr_533609_839829468(param0);
if (!LOC15) goto LA16;
initlocexpr_539283_839829468(p0, n_539790_839829468, (&a0));
result0 = addrloc_538204_839829468(a0);
}
goto LA1;
LA16: ;
{
NIM_BOOL LOC19;
NIM_BOOL LOC20;
NIM_BOOL LOC21;
Tnode292802* callee0;
LOC19 = (NIM_BOOL)0;
LOC20 = (NIM_BOOL)0;
LOC21 = (NIM_BOOL)0;
LOC21 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC21) goto LA22;
LOC21 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA22: ;
LOC20 = LOC21;
if (!(LOC20)) goto LA23;
LOC20 = ((*(*param0).typ).kind == ((Ttypekind292244) 23));
LA23: ;
LOC19 = LOC20;
if (!(LOC19)) goto LA24;
LOC19 = ((*n_539790_839829468).kind == ((Tnodekind292020) 64));
LA24: ;
if (!LOC19) goto LA25;
initlocexprsingleuse_539289_839829468(p0, (*n_539790_839829468).kindU.S6.sons->data[((NI) 0)], (&a0));
callee0 = (*call0).kindU.S6.sons->data[((NI) 0)];
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((*callee0).kind == ((Tnodekind292020) 3));
if (!(LOC30)) goto LA31;
LOC30 = ((134283296 & (*(*callee0).kindU.S4.sym).flags) == 32);
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA32;
LOC29 = !(((72 & (*(*callee0).kindU.S4.sym).loc.flags) == 0));
LA32: ;
if (!LOC29) goto LA33;
result0 = addrloc_538204_839829468(a0);
}
goto LA27;
LA33: ;
{
result0 = rdloc_538188_839829468(a0);
}
LA27: ;
}
goto LA1;
LA25: ;
{
initlocexprsingleuse_539289_839829468(p0, n_539790_839829468, (&a0));
result0 = rdloc_538188_839829468(a0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genargnoparam_539938_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!((*n0).kind == ((Tnodekind292020) 71))) goto LA3;
result0 = genargstringtocstring_539776_839829468(p0, n0);
}
goto LA1;
LA3: ;
{
initlocexprsingleuse_539289_839829468(p0, n0, (&a0));
result0 = rdloc_538188_839829468(a0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getrawproctype_540459_839829468)(Tcproc529021* p0, Ttype292840* t0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getclosuretype_535683_839829468((*p0).module, t0, ((Tclosuretypekind535679) 0));
return result0;
}
N_NIMCALL(NIM_BOOL, leftappearsonrightside_539329_839829468)(Tnode292802* le0, Tnode292802* ri0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!!((le0 == NIM_NIL))) goto LA3;
{
NI i_539364_839829468;
NI HEX3Atmp_539376_839829468;
NI LOC6;
NI res_539379_839829468;
i_539364_839829468 = (NI)0;
HEX3Atmp_539376_839829468 = (NI)0;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(ri0);
HEX3Atmp_539376_839829468 = (LOC6 - 1);
res_539379_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* r0;
if (!(res_539379_839829468 <= HEX3Atmp_539376_839829468)) goto LA8;
i_539364_839829468 = res_539379_839829468;
r0 = HEX5BHEX5D_293238_850551059(ri0, i_539364_839829468);
{
Tanalysisresult473003 LOC11;
LOC11 = (Tanalysisresult473003)0;
LOC11 = ispartof_473340_788060399(le0, r0);
if (!!((LOC11 == ((Tanalysisresult473003) 0)))) goto LA12;
result0 = NIM_TRUE;
goto BeforeRet;
}
LA12: ;
res_539379_839829468 += ((NI) 1);
} LA8: ;
}
}
}
LA3: ;
}BeforeRet: ;
return result0;
}
static N_INLINE(NIM_BOOL, hasnoinit_539383_839829468)(Tnode292802* call0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*(*call0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*(*call0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, resetloc_538350_839829468)(Tcproc529021* p0, Tloc292816* loc0) {
NIM_BOOL containsgcref0;
Ttype292840* typ0;
{ containsgcref0 = containsgarbagecollectedref_320117_3876443242((*loc0).t);
typ0 = skiptypes_296099_850551059((*loc0).t, IL64(211106242013440));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isimportedcpptype_533476_839829468(typ0);
if (!LOC3) goto LA4;
goto BeforeRet;
}
LA4: ;
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = iscomplexvaluetype_538317_839829468(typ0);
if (!!(LOC8)) goto LA9;
{
Tloc292816 nilloc0;
if (!containsgcref0) goto LA13;
memset((void*)(&nilloc0), 0, sizeof(nilloc0));
initloc_532273_839829468((&nilloc0), ((Tlockind292808) 1), (*loc0).t, ((Tstorageloc292812) 2));
nilloc0.r = rope_178277_2381377266(((NimStringDesc*) &T839829468_174));
genrefassign_538311_839829468(p0, (*loc0), nilloc0, 8);
}
goto LA11;
LA13: ;
{
TY178507 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((*loc0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_494), LOC16, 1);
}
LA11: ;
}
goto LA6;
LA9: ;
{
{
TY178507 LOC22;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 6))&31U)))!=0)) goto LA20;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = addrloc_538204_839829468((*loc0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_495), LOC22, 1);
}
LA20: ;
{
TY532811 LOC27;
if (!!(((*loc0).s == ((Tstorageloc292812) 2)))) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = addrloc_538204_839829468((*loc0));
LOC27[1] = gentypeinfo_535941_839829468((*p0).module, (*loc0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_496), LOC27, 2);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, (*loc0), NIM_TRUE);
}
goto LA23;
LA25: ;
{
TY532811 LOC29;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = addrloc_538204_839829468((*loc0));
LOC29[1] = rdloc_538188_839829468((*loc0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_152), LOC29, 2);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, (*loc0), NIM_TRUE);
}
LA23: ;
}
LA6: ;
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, addcomma_540464_839829468)(Ropeobj178006* r0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(r0 == NIM_NIL)) goto LA3;
result0 = r0;
}
goto LA1;
LA3: ;
{
TY533289 LOC6;
Ropeobj178006* LOC7;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC6, 0);
result0 = HEX26_178418_2381377266(r0, LOC7);
}
LA1: ;
return result0;
}
N_NIMCALL(void, genclosurecall_540452_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* pl0;
Ttype292840* typ0;
NI length0;
Ropeobj178006* rawproc0;
NimStringDesc* callpattern0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
pl0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
{
NI i_540613_839829468;
NI HEX3Atmp_541214_839829468;
NI res_541217_839829468;
i_540613_839829468 = (NI)0;
HEX3Atmp_541214_839829468 = (NI)0;
HEX3Atmp_541214_839829468 = (NI)(length0 - ((NI) 1));
res_541217_839829468 = ((NI) 1);
{
while (1) {
if (!(res_541217_839829468 <= HEX3Atmp_541214_839829468)) goto LA3;
i_540613_839829468 = res_541217_839829468;
{
NI LOC6;
Tnode292802* paramtype0;
LOC6 = (NI)0;
LOC6 = sonslen_295327_850551059(typ0);
if (!(i_540613_839829468 < LOC6)) goto LA7;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_540613_839829468];
{
NIM_BOOL LOC11;
Ropeobj178006* LOC20;
LOC11 = (NIM_BOOL)0;
LOC11 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!!(LOC11)) goto LA12;
{
TY533289 LOC18;
Ropeobj178006* LOC19;
if (!!((pl0 == NIM_NIL))) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (Ropeobj178006*)0;
LOC19 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0);
add_178482_2381377266(&pl0, LOC19);
}
LA16: ;
LOC20 = (Ropeobj178006*)0;
LOC20 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_540613_839829468], (*paramtype0).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC20);
}
LA12: ;
}
goto LA4;
LA7: ;
{
Ropeobj178006* LOC28;
{
TY533289 LOC26;
Ropeobj178006* LOC27;
if (!!((pl0 == NIM_NIL))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC27 = (Ropeobj178006*)0;
LOC27 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0);
add_178482_2381377266(&pl0, LOC27);
}
LA24: ;
LOC28 = (Ropeobj178006*)0;
LOC28 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i_540613_839829468]);
add_178482_2381377266(&pl0, LOC28);
}
LA4: ;
res_541217_839829468 += ((NI) 1);
} LA3: ;
}
}
rawproc0 = getrawproctype_540459_839829468(p0, typ0);
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 14))&31U)))!=0)) goto LA31;
callpattern0 = copyString(((NimStringDesc*) &T839829468_492));
}
goto LA29;
LA31: ;
{
callpattern0 = copyString(((NimStringDesc*) &T839829468_493));
}
LA29: ;
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA36;
{
NIM_BOOL LOC40;
LOC40 = (NIM_BOOL)0;
LOC40 = isinvalidreturntype_533548_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC40) goto LA41;
{
NI LOC45;
TY533289 LOC48;
Ropeobj178006* LOC49;
LOC45 = (NI)0;
LOC45 = sonslen_295351_850551059(ri0);
if (!(((NI) 1) < LOC45)) goto LA46;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC49 = (Ropeobj178006*)0;
LOC49 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC48, 0);
add_178482_2381377266(&pl0, LOC49);
}
LA46: ;
{
NIM_BOOL LOC52;
NIM_BOOL LOC54;
Ropeobj178006* LOC67;
NimStringDesc* LOC68;
TY535235 LOC69;
LOC52 = (NIM_BOOL)0;
LOC52 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0);
if (LOC52) goto LA53;
LOC54 = (NIM_BOOL)0;
LOC54 = leftappearsonrightside_539329_839829468(le0, ri0);
LOC52 = !(LOC54);
LA53: ;
if (!LOC52) goto LA55;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA59;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
goto LA57;
LA59: ;
{
NIM_BOOL LOC62;
NIM_BOOL LOC64;
LOC62 = (NIM_BOOL)0;
LOC62 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0));
if (!(LOC62)) goto LA63;
LOC64 = (NIM_BOOL)0;
LOC64 = hasnoinit_539383_839829468(ri0);
LOC62 = !(LOC64);
LA63: ;
if (!LOC62) goto LA65;
resetloc_538350_839829468(p0, d0);
}
goto LA57;
LA65: ;
LA57: ;
LOC67 = (Ropeobj178006*)0;
LOC67 = addrloc_538204_839829468((*d0));
add_178482_2381377266(&pl0, LOC67);
LOC68 = (NimStringDesc*)0;
LOC68 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC68, callpattern0);
appendString(LOC68, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC69, 0, sizeof(LOC69));
LOC69[0] = op0.r;
LOC69[1] = pl0;
LOC69[2] = addcomma_540464_839829468(pl0);
LOC69[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC68, LOC69, 4);
}
goto LA50;
LA55: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC71;
NimStringDesc* LOC72;
TY535235 LOC73;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC71 = (Ropeobj178006*)0;
LOC71 = addrloc_538204_839829468(tmp0);
add_178482_2381377266(&pl0, LOC71);
LOC72 = (NimStringDesc*)0;
LOC72 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC72, callpattern0);
appendString(LOC72, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = op0.r;
LOC73[1] = pl0;
LOC73[2] = addcomma_540464_839829468(pl0);
LOC73[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC72, LOC73, 4);
genassignment_539264_839829468(p0, (*d0), tmp0, 0);
}
LA50: ;
}
goto LA38;
LA41: ;
{
Tloc292816 list0;
TY535235 LOC79;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA77;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA77: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
memset((void*)LOC79, 0, sizeof(LOC79));
LOC79[0] = op0.r;
LOC79[1] = pl0;
LOC79[2] = addcomma_540464_839829468(pl0);
LOC79[3] = rawproc0;
list0.r = HEX25_178905_2381377266(callpattern0, LOC79, 4);
genassignment_539264_839829468(p0, (*d0), list0, 0);
}
LA38: ;
}
goto LA34;
LA36: ;
{
NimStringDesc* LOC81;
TY535235 LOC82;
LOC81 = (NimStringDesc*)0;
LOC81 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC81, callpattern0);
appendString(LOC81, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC82, 0, sizeof(LOC82));
LOC82[0] = op0.r;
LOC82[1] = pl0;
LOC82[2] = addcomma_540464_839829468(pl0);
LOC82[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC81, LOC82, 4);
}
LA34: ;
}
N_NIMCALL(Ropeobj178006*, genotherarg_539277_839829468)(Tcproc529021* p0, Tnode292802* ri0, NI i0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NI LOC3;
Tnode292802* paramtype0;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
if (!(i0 < LOC3)) goto LA4;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i0];
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!LOC8) goto LA9;
result0 = NIM_NIL;
}
goto LA6;
LA9: ;
{
NIM_BOOL LOC12;
Tnode292802* LOC16;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*typ0).sons->data[i0]).kind == ((Ttypekind292244) 23));
if (!(LOC12)) goto LA13;
LOC12 = ((*(*ri0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 64));
LA13: ;
if (!LOC12) goto LA14;
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059((*ri0).kindU.S6.sons->data[i0], ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC16);
}
goto LA6;
LA14: ;
{
result0 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]);
}
LA6: ;
}
goto LA1;
LA4: ;
{
{
if (!!((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0))) goto LA21;
localerror_196085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_501));
result0 = NIM_NIL;
}
goto LA19;
LA21: ;
{
result0 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]);
}
LA19: ;
}
LA1: ;
return result0;
}
N_NIMCALL(Tnode292802*, skipaddrderef_541433_839829468)(Tnode292802* node0) {
Tnode292802* result0;
Tnode292802* n0;
NIM_BOOL isaddr0;
{ result0 = (Tnode292802*)0;
n0 = node0;
isaddr0 = NIM_FALSE;
switch ((*n0).kind) {
case ((Tnodekind292020) 63):
case ((Tnodekind292020) 64):
{
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
isaddr0 = NIM_TRUE;
}
break;
case ((Tnodekind292020) 47):
case ((Tnodekind292020) 65):
{
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
break;
default:
{
result0 = n0;
goto BeforeRet;
}
break;
}
{
if (!((*n0).kind == ((Tnodekind292020) 66))) goto LA6;
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
LA6: ;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = isaddr0;
if (!(LOC10)) goto LA11;
LOC10 = ((*n0).kind == ((Tnodekind292020) 47) || (*n0).kind == ((Tnodekind292020) 65));
LA11: ;
if (!LOC10) goto LA12;
result0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
goto LA8;
LA12: ;
{
if (!((*n0).kind == ((Tnodekind292020) 63) || (*n0).kind == ((Tnodekind292020) 64))) goto LA15;
result0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
goto LA8;
LA15: ;
{
result0 = node0;
}
LA8: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genthisarg_541475_839829468)(Tcproc529021* p0, Tnode292802* ri_541478_839829468, NI i0, Ttype292840* typ0) {
Ropeobj178006* result0;
Tnode292802* ri0;
Ttype292840* t0;
result0 = (Ropeobj178006*)0;
{
NI LOC3;
NimStringDesc* LOC6;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
if (!!((i0 < LOC3))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_503);
internalerror_196113_155036129(LOC6);
}
LA4: ;
ri0 = HEX5BHEX5D_293238_850551059(ri_541478_839829468, i0);
{
while (1) {
if (!((*ri0).kind == ((Tnodekind292020) 66))) goto LA8;
ri0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
} LA8: ;
}
t0 = skiptypes_296099_850551059((*typ0).sons->data[i0], 2048);
{
Tnode292802* x0;
if (!((*t0).kind == ((Ttypekind292244) 23))) goto LA11;
{
if (!((*ri0).kind == ((Tnodekind292020) 64))) goto LA15;
x0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
}
goto LA13;
LA15: ;
{
x0 = ri0;
}
LA13: ;
{
if (!((*(*x0).typ).kind == ((Ttypekind292244) 21))) goto LA20;
result0 = genargnoparam_539938_839829468(p0, x0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
goto LA18;
LA20: ;
{
NIM_BOOL LOC23;
Tnode292802* LOC25;
Tnode292802* LOC28;
LOC23 = (NIM_BOOL)0;
LOC23 = ((*x0).kind == ((Tnodekind292020) 65) || (*x0).kind == ((Tnodekind292020) 47));
if (!(LOC23)) goto LA24;
LOC25 = (Tnode292802*)0;
LOC25 = HEX5BHEX5D_293238_850551059(x0, ((NI) 0));
LOC23 = ((*(*LOC25).typ).kind == ((Ttypekind292244) 21));
LA24: ;
if (!LOC23) goto LA26;
LOC28 = (Tnode292802*)0;
LOC28 = HEX5BHEX5D_293238_850551059(x0, ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC28);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
goto LA18;
LA26: ;
{
result0 = genargnoparam_539938_839829468(p0, x0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
LA18: ;
}
goto LA9;
LA11: ;
{
if (!((*t0).kind == ((Ttypekind292244) 21))) goto LA31;
{
Tnode292802* LOC37;
if (!((*ri0).kind == ((Tnodekind292020) 63) || (*ri0).kind == ((Tnodekind292020) 64))) goto LA35;
LOC37 = (Tnode292802*)0;
LOC37 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC37);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
goto LA33;
LA35: ;
{
result0 = genargnoparam_539938_839829468(p0, ri0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
LA33: ;
}
goto LA9;
LA31: ;
{
ri0 = skipaddrderef_541433_839829468(ri0);
{
if (!((*ri0).kind == ((Tnodekind292020) 63) || (*ri0).kind == ((Tnodekind292020) 64))) goto LA42;
ri0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
}
LA42: ;
result0 = genargnoparam_539938_839829468(p0, ri0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
LA9: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genpatterncall_541699_839829468)(Tcproc529021* p0, Tnode292802* ri_541702_839829468, NimStringDesc* pat0, Ttype292840* typ_541704_839829468) {
Ropeobj178006* result0;
NI i0;
NI j0;
result0 = (Ropeobj178006*)0;
i0 = ((NI) 0);
j0 = ((NI) 1);
{
while (1) {
if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA2;
switch (((NU8)(pat0->data[i0]))) {
case 64:
{
{
NI LOC6;
Ropeobj178006* LOC9;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(ri_541702_839829468);
if (!(j0 < LOC6)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = genotherarg_539277_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC9);
{
NI k_541728_839829468;
NI HEX3Atmp_541904_839829468;
NI HEX3Atmp_541905_839829468;
NI LOC11;
NI res_541908_839829468;
k_541728_839829468 = (NI)0;
HEX3Atmp_541904_839829468 = (NI)0;
HEX3Atmp_541905_839829468 = (NI)0;
HEX3Atmp_541904_839829468 = (NI)(j0 + ((NI) 1));
LOC11 = (NI)0;
LOC11 = len_293081_850551059(ri_541702_839829468);
HEX3Atmp_541905_839829468 = (LOC11 - 1);
res_541908_839829468 = HEX3Atmp_541904_839829468;
{
while (1) {
TY533289 LOC14;
Ropeobj178006* LOC15;
Ropeobj178006* LOC16;
if (!(res_541908_839829468 <= HEX3Atmp_541905_839829468)) goto LA13;
k_541728_839829468 = res_541908_839829468;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC15 = (Ropeobj178006*)0;
LOC15 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC14, 0);
add_178482_2381377266(&result0, LOC15);
LOC16 = (Ropeobj178006*)0;
LOC16 = genotherarg_539277_839829468(p0, ri_541702_839829468, k_541728_839829468, typ_541704_839829468);
add_178482_2381377266(&result0, LOC16);
res_541908_839829468 += ((NI) 1);
} LA13: ;
}
}
}
LA7: ;
i0 += ((NI) 1);
}
break;
case 35:
{
{
Tnode292802* ri0;
if (!(((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(43)) || ((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(64)))) goto LA20;
ri0 = HEX5BHEX5D_293238_850551059(ri_541702_839829468, j0);
{
Ttype292840* typ0;
TY533289 LOC31;
Ropeobj178006* LOC32;
TY533289 LOC46;
Ropeobj178006* LOC47;
if (!((*ri0).kind == ((Tnodekind292020) 27) || (*ri0).kind == ((Tnodekind292020) 29) || (*ri0).kind == ((Tnodekind292020) 30) || (*ri0).kind == ((Tnodekind292020) 31) || (*ri0).kind == ((Tnodekind292020) 26) || (*ri0).kind == ((Tnodekind292020) 28) || (*ri0).kind == ((Tnodekind292020) 32))) goto LA24;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
Ropeobj178006* LOC30;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(43))) goto LA28;
LOC30 = (Ropeobj178006*)0;
LOC30 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)]);
add_178482_2381377266(&result0, LOC30);
}
LA28: ;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_118), LOC31, 0);
add_178482_2381377266(&result0, LOC32);
{
NI LOC35;
Ropeobj178006* LOC38;
LOC35 = (NI)0;
LOC35 = len_293081_850551059(ri0);
if (!(((NI) 1) < LOC35)) goto LA36;
LOC38 = (Ropeobj178006*)0;
LOC38 = genotherarg_539277_839829468(p0, ri0, ((NI) 1), typ0);
add_178482_2381377266(&result0, LOC38);
}
LA36: ;
{
NI k_541793_839829468;
NI HEX3Atmp_541915_839829468;
NI HEX3Atmp_541916_839829468;
NI LOC40;
NI res_541919_839829468;
k_541793_839829468 = (NI)0;
HEX3Atmp_541915_839829468 = (NI)0;
HEX3Atmp_541916_839829468 = (NI)0;
HEX3Atmp_541915_839829468 = (NI)(j0 + ((NI) 1));
LOC40 = (NI)0;
LOC40 = len_293081_850551059(ri0);
HEX3Atmp_541916_839829468 = (LOC40 - 1);
res_541919_839829468 = HEX3Atmp_541915_839829468;
{
while (1) {
TY533289 LOC43;
Ropeobj178006* LOC44;
Ropeobj178006* LOC45;
if (!(res_541919_839829468 <= HEX3Atmp_541916_839829468)) goto LA42;
k_541793_839829468 = res_541919_839829468;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC44 = (Ropeobj178006*)0;
LOC44 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC43, 0);
add_178482_2381377266(&result0, LOC44);
LOC45 = (Ropeobj178006*)0;
LOC45 = genotherarg_539277_839829468(p0, ri0, k_541793_839829468, typ0);
add_178482_2381377266(&result0, LOC45);
res_541919_839829468 += ((NI) 1);
} LA42: ;
}
}
memset((void*)LOC46, 0, sizeof(LOC46));
LOC47 = (Ropeobj178006*)0;
LOC47 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_117), LOC46, 0);
add_178482_2381377266(&result0, LOC47);
}
goto LA22;
LA24: ;
{
localerror_196085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_502));
}
LA22: ;
i0 += ((NI) 1);
}
goto LA18;
LA20: ;
{
Ropeobj178006* LOC52;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(46))) goto LA50;
LOC52 = (Ropeobj178006*)0;
LOC52 = genthisarg_541475_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC52);
i0 += ((NI) 1);
}
goto LA18;
LA50: ;
{
Tnode292802* arg0;
Ropeobj178006* LOC58;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(91))) goto LA54;
arg0 = skipaddrderef_541433_839829468((*ri_541702_839829468).kindU.S6.sons->data[j0]);
{
while (1) {
if (!((*arg0).kind == ((Tnodekind292020) 63) || (*arg0).kind == ((Tnodekind292020) 64) || (*arg0).kind == ((Tnodekind292020) 66))) goto LA57;
arg0 = HEX5BHEX5D_293238_850551059(arg0, ((NI) 0));
} LA57: ;
}
LOC58 = (Ropeobj178006*)0;
LOC58 = genargnoparam_539938_839829468(p0, arg0);
add_178482_2381377266(&result0, LOC58);
}
goto LA18;
LA54: ;
{
Ropeobj178006* LOC60;
LOC60 = (Ropeobj178006*)0;
LOC60 = genotherarg_539277_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC60);
}
LA18: ;
j0 += ((NI) 1);
i0 += ((NI) 1);
}
break;
case 39:
{
NI idx0;
NI stars0;
idx0 = (NI)0;
stars0 = (NI)0;
{
NIM_BOOL LOC64;
Ttype292840* t0;
LOC64 = (NIM_BOOL)0;
LOC64 = scancppgenericslot_534827_839829468(pat0, (&i0), (&idx0), (&stars0));
if (!LOC64) goto LA65;
t0 = resolvestarsincpptype_534891_839829468(typ_541704_839829468, idx0, stars0);
{
TY533289 LOC71;
Ropeobj178006* LOC72;
if (!(t0 == NIM_NIL)) goto LA69;
memset((void*)LOC71, 0, sizeof(LOC71));
LOC72 = (Ropeobj178006*)0;
LOC72 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC71, 0);
add_178482_2381377266(&result0, LOC72);
}
goto LA67;
LA69: ;
{
Ropeobj178006* LOC74;
LOC74 = (Ropeobj178006*)0;
LOC74 = gettypedesc_535671_839829468((*p0).module, t0);
add_178482_2381377266(&result0, LOC74);
}
LA67: ;
}
LA65: ;
}
break;
default:
{
NI start0;
start0 = i0;
{
while (1) {
if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA77;
{
if (!!((((NU8)(pat0->data[i0])) == ((NU8)(64)) || ((NU8)(pat0->data[i0])) == ((NU8)(35)) || ((NU8)(pat0->data[i0])) == ((NU8)(39))))) goto LA80;
i0 += ((NI) 1);
}
goto LA78;
LA80: ;
{
goto LA76;
}
LA78: ;
} LA77: ;
} LA76: ;
{
NimStringDesc* LOC87;
if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA85;
LOC87 = (NimStringDesc*)0;
LOC87 = copyStrLast(pat0, start0, (NI)(i0 - ((NI) 1)));
add_178487_2381377266(&result0, LOC87);
}
LA85: ;
}
break;
}
} LA2: ;
}
return result0;
}
N_NIMCALL(void, fixupcall_539410_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0, Ropeobj178006* callee0, Ropeobj178006* params0) {
Ropeobj178006* pl0;
TY533289 LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
Ttype292840* typ0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_118), LOC1, 0);
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX26_178418_2381377266(callee0, LOC2);
pl0 = HEX26_178418_2381377266(LOC3, params0);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA6;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = isinvalidreturntype_533548_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC10) goto LA11;
{
TY533289 LOC17;
Ropeobj178006* LOC18;
if (!!((params0 == NIM_NIL))) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC18 = (Ropeobj178006*)0;
LOC18 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC17, 0);
add_178482_2381377266(&pl0, LOC18);
}
LA15: ;
{
NIM_BOOL LOC21;
NIM_BOOL LOC23;
Ropeobj178006* LOC36;
TY533289 LOC37;
Ropeobj178006* LOC38;
LOC21 = (NIM_BOOL)0;
LOC21 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0);
if (LOC21) goto LA22;
LOC23 = (NIM_BOOL)0;
LOC23 = leftappearsonrightside_539329_839829468(le0, ri0);
LOC21 = !(LOC23);
LA22: ;
if (!LOC21) goto LA24;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA28;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
goto LA26;
LA28: ;
{
NIM_BOOL LOC31;
NIM_BOOL LOC33;
LOC31 = (NIM_BOOL)0;
LOC31 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0));
if (!(LOC31)) goto LA32;
LOC33 = (NIM_BOOL)0;
LOC33 = hasnoinit_539383_839829468(ri0);
LOC31 = !(LOC33);
LA32: ;
if (!LOC31) goto LA34;
resetloc_538350_839829468(p0, d0);
}
goto LA26;
LA34: ;
LA26: ;
LOC36 = (Ropeobj178006*)0;
LOC36 = addrloc_538204_839829468((*d0));
add_178482_2381377266(&pl0, LOC36);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC38 = (Ropeobj178006*)0;
LOC38 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC37, 0);
add_178482_2381377266(&pl0, LOC38);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
goto LA19;
LA24: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC40;
TY533289 LOC41;
Ropeobj178006* LOC42;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC40 = (Ropeobj178006*)0;
LOC40 = addrloc_538204_839829468(tmp0);
add_178482_2381377266(&pl0, LOC40);
memset((void*)LOC41, 0, sizeof(LOC41));
LOC42 = (Ropeobj178006*)0;
LOC42 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC41, 0);
add_178482_2381377266(&pl0, LOC42);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
genassignment_539264_839829468(p0, (*d0), tmp0, 0);
}
LA19: ;
}
goto LA8;
LA11: ;
{
TY533289 LOC44;
Ropeobj178006* LOC45;
memset((void*)LOC44, 0, sizeof(LOC44));
LOC45 = (Ropeobj178006*)0;
LOC45 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_117), LOC44, 0);
add_178482_2381377266(&pl0, LOC45);
{
NIM_BOOL LOC48;
NIM_BOOL LOC49;
LOC48 = (NIM_BOOL)0;
LOC49 = (NIM_BOOL)0;
LOC49 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC49) goto LA50;
LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA50: ;
LOC48 = LOC49;
if (!(LOC48)) goto LA51;
LOC48 = (((*d0).flags &(1U<<((NU)(((Tlocflag292810) 8))&15U)))!=0);
LA51: ;
if (!LOC48) goto LA52;
(*d0).k = ((Tlockind292808) 9);
unsureAsgnRef((void**) (&(*d0).r), pl0);
(*d0).flags &= ~(((NU16)1) << ((((Tlocflag292810) 8)) % (sizeof(NU16)*8)));
}
goto LA46;
LA52: ;
{
Tloc292816 list0;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA57;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA57: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (*d0), list0, 0);
}
LA46: ;
}
LA8: ;
}
goto LA4;
LA6: ;
{
TY533289 LOC60;
Ropeobj178006* LOC61;
memset((void*)LOC60, 0, sizeof(LOC60));
LOC61 = (Ropeobj178006*)0;
LOC61 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC60, 0);
add_178482_2381377266(&pl0, LOC61);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA4: ;
}
N_NIMCALL(void, geninfixcall_541929_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ttype292840* typ_541940_839829468;
NI length0;
NimStringDesc* pat0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
typ_541940_839829468 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data;
{
NimStringDesc* LOC5;
if (!!(!((pat0 == NIM_NIL)))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_498);
internalerror_196113_155036129(LOC5);
}
LA3: ;
{
NIM_BOOL LOC8;
Ropeobj178006* pl0;
Ttype292840* typ0;
LOC8 = (NIM_BOOL)0;
LOC8 = contains_110056_4286263276(pat0, T839829468_500);
if (!LOC8) goto LA9;
pl0 = genpatterncall_541699_839829468(p0, ri0, pat0, typ_541940_839829468);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA13;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC18) goto LA19;
LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA19: ;
LOC17 = LOC18;
if (!(LOC17)) goto LA20;
LOC17 = (((*d0).flags &(1U<<((NU)(((Tlocflag292810) 8))&15U)))!=0);
LA20: ;
if (!LOC17) goto LA21;
(*d0).k = ((Tlockind292808) 9);
unsureAsgnRef((void**) (&(*d0).r), pl0);
(*d0).flags &= ~(((NU16)1) << ((((Tlocflag292810) 8)) % (sizeof(NU16)*8)));
}
goto LA15;
LA21: ;
{
Tloc292816 list0;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA26;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA26: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (*d0), list0, 0);
}
LA15: ;
}
goto LA11;
LA13: ;
{
TY533289 LOC29;
Ropeobj178006* LOC30;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC30 = (Ropeobj178006*)0;
LOC30 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_497), LOC29, 0);
add_178482_2381377266(&pl0, LOC30);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA11: ;
}
goto LA6;
LA9: ;
{
Ropeobj178006* pl0;
Ropeobj178006* params0;
pl0 = NIM_NIL;
{
NI LOC34;
Ropeobj178006* LOC37;
LOC34 = (NI)0;
LOC34 = len_293081_850551059(ri0);
if (!(((NI) 1) < LOC34)) goto LA35;
LOC37 = (Ropeobj178006*)0;
LOC37 = genthisarg_541475_839829468(p0, ri0, ((NI) 1), typ_541940_839829468);
add_178482_2381377266(&pl0, LOC37);
}
LA35: ;
add_178482_2381377266(&pl0, op0.r);
params0 = (Ropeobj178006*)0;
{
NI i_542425_839829468;
NI HEX3Atmp_542609_839829468;
NI res_542612_839829468;
i_542425_839829468 = (NI)0;
HEX3Atmp_542609_839829468 = (NI)0;
HEX3Atmp_542609_839829468 = (NI)(length0 - ((NI) 1));
res_542612_839829468 = ((NI) 2);
{
while (1) {
Ropeobj178006* LOC47;
if (!(res_542612_839829468 <= HEX3Atmp_542609_839829468)) goto LA40;
i_542425_839829468 = res_542612_839829468;
{
TY533289 LOC45;
Ropeobj178006* LOC46;
if (!!((params0 == NIM_NIL))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC45, 0);
add_178482_2381377266(¶ms0, LOC46);
}
LA43: ;
LOC47 = (Ropeobj178006*)0;
LOC47 = genotherarg_539277_839829468(p0, ri0, i_542425_839829468, typ_541940_839829468);
add_178482_2381377266(¶ms0, LOC47);
res_542612_839829468 += ((NI) 1);
} LA40: ;
}
}
fixupcall_539410_839829468(p0, le0, ri0, d0, pl0, params0);
}
LA6: ;
}
N_NIMCALL(void, gennamedparamcall_542616_839829468)(Tcproc529021* p0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* pl0;
TY533289 LOC1;
Ttype292840* typ0;
NI length0;
NimStringDesc* pat0;
NI start0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
memset((void*)LOC1, 0, sizeof(LOC1));
pl0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_506), LOC1, 0);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data;
{
NimStringDesc* LOC6;
if (!!(!((pat0 == NIM_NIL)))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_507);
internalerror_196113_155036129(LOC6);
}
LA4: ;
start0 = ((NI) 3);
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = contains_110046_4286263276(pat0, 32);
if (!LOC9) goto LA10;
start0 = ((NI) 1);
add_178482_2381377266(&pl0, op0.r);
{
TY533289 LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC18;
if (!(((NI) 1) < length0)) goto LA14;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC16, 0);
add_178482_2381377266(&pl0, LOC17);
LOC18 = (Ropeobj178006*)0;
LOC18 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC18);
start0 = ((NI) 2);
}
LA14: ;
}
goto LA7;
LA10: ;
{
{
Ropeobj178006* LOC24;
TY533289 LOC25;
Ropeobj178006* LOC26;
if (!(((NI) 1) < length0)) goto LA22;
LOC24 = (Ropeobj178006*)0;
LOC24 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC24);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC26 = (Ropeobj178006*)0;
LOC26 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC25, 0);
add_178482_2381377266(&pl0, LOC26);
}
LA22: ;
add_178482_2381377266(&pl0, op0.r);
{
TY533289 LOC31;
Ropeobj178006* LOC32;
Ropeobj178006* LOC33;
if (!(((NI) 2) < length0)) goto LA29;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC31, 0);
add_178482_2381377266(&pl0, LOC32);
LOC33 = (Ropeobj178006*)0;
LOC33 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 2)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 2)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC33);
}
LA29: ;
}
LA7: ;
{
NI i_543051_839829468;
NI HEX3Atmp_543617_839829468;
NI res_543620_839829468;
i_543051_839829468 = (NI)0;
HEX3Atmp_543617_839829468 = (NI)0;
HEX3Atmp_543617_839829468 = (NI)(length0 - ((NI) 1));
res_543620_839829468 = start0;
{
while (1) {
Tsym292834* param0;
TY533289 LOC42;
Ropeobj178006* LOC43;
TY533289 LOC44;
Ropeobj178006* LOC45;
Ropeobj178006* LOC46;
if (!(res_543620_839829468 <= HEX3Atmp_543617_839829468)) goto LA36;
i_543051_839829468 = res_543620_839829468;
{
NI LOC39;
LOC39 = (NI)0;
LOC39 = sonslen_295327_850551059(typ0);
if (!(LOC39 <= i_543051_839829468)) goto LA40;
internalerror_196100_155036129((*ri0).info, ((NimStringDesc*) &T839829468_508));
}
LA40: ;
param0 = (*(*(*typ0).n).kindU.S6.sons->data[i_543051_839829468]).kindU.S4.sym;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC43 = (Ropeobj178006*)0;
LOC43 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC42, 0);
add_178482_2381377266(&pl0, LOC43);
add_178487_2381377266(&pl0, (*(*param0).name).s);
memset((void*)LOC44, 0, sizeof(LOC44));
LOC45 = (Ropeobj178006*)0;
LOC45 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC44, 0);
add_178482_2381377266(&pl0, LOC45);
LOC46 = (Ropeobj178006*)0;
LOC46 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_543051_839829468], param0, ri0);
add_178482_2381377266(&pl0, LOC46);
res_543620_839829468 += ((NI) 1);
} LA36: ;
}
}
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA49;
{
NIM_BOOL LOC53;
LOC53 = (NIM_BOOL)0;
LOC53 = isinvalidreturntype_533548_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC53) goto LA54;
{
NI LOC58;
TY533289 LOC61;
Ropeobj178006* LOC62;
LOC58 = (NI)0;
LOC58 = sonslen_295351_850551059(ri0);
if (!(((NI) 1) < LOC58)) goto LA59;
memset((void*)LOC61, 0, sizeof(LOC61));
LOC62 = (Ropeobj178006*)0;
LOC62 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC61, 0);
add_178482_2381377266(&pl0, LOC62);
}
LA59: ;
{
TY533289 LOC71;
Ropeobj178006* LOC72;
Ropeobj178006* LOC73;
TY533289 LOC74;
Ropeobj178006* LOC75;
if (!((3 &(1U<<((NU)((*d0).k)&15U)))!=0)) goto LA65;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA69;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
LA69: ;
memset((void*)LOC71, 0, sizeof(LOC71));
LOC72 = (Ropeobj178006*)0;
LOC72 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_509), LOC71, 0);
add_178482_2381377266(&pl0, LOC72);
LOC73 = (Ropeobj178006*)0;
LOC73 = addrloc_538204_839829468((*d0));
add_178482_2381377266(&pl0, LOC73);
memset((void*)LOC74, 0, sizeof(LOC74));
LOC75 = (Ropeobj178006*)0;
LOC75 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC74, 0);
add_178482_2381377266(&pl0, LOC75);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
goto LA63;
LA65: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC77;
TY533289 LOC78;
Ropeobj178006* LOC79;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC77 = (Ropeobj178006*)0;
LOC77 = addrloc_538204_839829468(tmp0);
add_178482_2381377266(&pl0, LOC77);
memset((void*)LOC78, 0, sizeof(LOC78));
LOC79 = (Ropeobj178006*)0;
LOC79 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC78, 0);
add_178482_2381377266(&pl0, LOC79);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
genassignment_539264_839829468(p0, (*d0), tmp0, 0);
}
LA63: ;
}
goto LA51;
LA54: ;
{
TY533289 LOC81;
Ropeobj178006* LOC82;
Tloc292816 list0;
memset((void*)LOC81, 0, sizeof(LOC81));
LOC82 = (Ropeobj178006*)0;
LOC82 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_511), LOC81, 0);
add_178482_2381377266(&pl0, LOC82);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA85;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA85: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), NIM_NIL, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (*d0), list0, 0);
}
LA51: ;
}
goto LA47;
LA49: ;
{
TY533289 LOC88;
Ropeobj178006* LOC89;
memset((void*)LOC88, 0, sizeof(LOC88));
LOC89 = (Ropeobj178006*)0;
LOC89 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC88, 0);
add_178482_2381377266(&pl0, LOC89);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA47: ;
}
N_NIMCALL(void, genprefixcall_539960_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* params0;
Ttype292840* typ0;
NI length0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
params0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
{
NI i_540213_839829468;
NI HEX3Atmp_540445_839829468;
NI res_540448_839829468;
i_540213_839829468 = (NI)0;
HEX3Atmp_540445_839829468 = (NI)0;
HEX3Atmp_540445_839829468 = (NI)(length0 - ((NI) 1));
res_540448_839829468 = ((NI) 1);
{
while (1) {
if (!(res_540448_839829468 <= HEX3Atmp_540445_839829468)) goto LA3;
i_540213_839829468 = res_540448_839829468;
{
NI LOC6;
Tnode292802* paramtype0;
LOC6 = (NI)0;
LOC6 = sonslen_295327_850551059(typ0);
if (!(i_540213_839829468 < LOC6)) goto LA7;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_540213_839829468];
{
NIM_BOOL LOC11;
Ropeobj178006* LOC20;
LOC11 = (NIM_BOOL)0;
LOC11 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!!(LOC11)) goto LA12;
{
TY533289 LOC18;
Ropeobj178006* LOC19;
if (!!((params0 == NIM_NIL))) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (Ropeobj178006*)0;
LOC19 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0);
add_178482_2381377266(¶ms0, LOC19);
}
LA16: ;
LOC20 = (Ropeobj178006*)0;
LOC20 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_540213_839829468], (*paramtype0).kindU.S4.sym, ri0);
add_178482_2381377266(¶ms0, LOC20);
}
LA12: ;
}
goto LA4;
LA7: ;
{
Ropeobj178006* LOC28;
{
TY533289 LOC26;
Ropeobj178006* LOC27;
if (!!((params0 == NIM_NIL))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC27 = (Ropeobj178006*)0;
LOC27 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0);
add_178482_2381377266(¶ms0, LOC27);
}
LA24: ;
LOC28 = (Ropeobj178006*)0;
LOC28 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i_540213_839829468]);
add_178482_2381377266(¶ms0, LOC28);
}
LA4: ;
res_540448_839829468 += ((NI) 1);
} LA3: ;
}
}
fixupcall_539410_839829468(p0, le0, ri0, d0, op0.r, params0);
}
static N_INLINE(void, poststmtactions_532942_839829468)(Tcproc529021* p0) {
Ropeobj178006** LOC1;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC1, (*(*p0).module).injectstmt);
}
N_NIMCALL(void, gencall_543632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, 2048);
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
genclosurecall_540452_839829468(p0, NIM_NIL, e0, d0);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
geninfixcall_541929_839829468(p0, NIM_NIL, e0, d0);
}
goto LA1;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC12)) goto LA13;
LOC12 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
gennamedparamcall_542616_839829468(p0, e0, d0);
}
goto LA1;
LA14: ;
{
genprefixcall_539960_839829468(p0, NIM_NIL, e0, d0);
}
LA1: ;
poststmtactions_532942_839829468(p0);
}
N_NIMCALL(void, genreset_554731_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
TY532811 LOC1;
Ttype292840* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = addrloc_538204_839829468(a0);
LOC2 = (Ttype292840*)0;
LOC2 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
LOC1[1] = gentypeinfo_535941_839829468((*p0).module, LOC2);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_496), LOC1, 2);
}
N_NIMCALL(void, genecho_554369_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NIM_BOOL LOC6;
Ropeobj178006* args0;
Tloc292816 a0;
TY532811 LOC18;
NimStringDesc* LOC19;
NI LOC20;
NimStringDesc* LOC21;
TY533289 LOC22;
{
NimStringDesc* LOC5;
if (!!(((*n0).kind == ((Tnodekind292020) 41)))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_512);
internalerror_196113_155036129(LOC5);
}
LA3: ;
LOC6 = (NIM_BOOL)0;
LOC6 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_513));
args0 = NIM_NIL;
memset((void*)(&a0), 0, sizeof(a0));
{
NI i_554404_839829468;
NI HEX3Atmp_554431_839829468;
NI LOC8;
NI res_554434_839829468;
i_554404_839829468 = (NI)0;
HEX3Atmp_554431_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(n0);
HEX3Atmp_554431_839829468 = (NI)(LOC8 - ((NI) 1));
res_554434_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554434_839829468 <= HEX3Atmp_554431_839829468)) goto LA10;
i_554404_839829468 = res_554434_839829468;
{
Tnode292802* LOC13;
LOC13 = (Tnode292802*)0;
LOC13 = skipconv_328882_3876443242((*n0).kindU.S6.sons->data[i_554404_839829468]);
if (!((*LOC13).kind == ((Tnodekind292020) 23))) goto LA14;
add_178487_2381377266(&args0, ((NimStringDesc*) &T839829468_514));
}
goto LA11;
LA14: ;
{
TY178507 LOC17;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[i_554404_839829468], (&a0));
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468(a0);
addf_179205_2381377266(&args0, ((NimStringDesc*) &T839829468_515), LOC17, 1);
}
LA11: ;
res_554434_839829468 += ((NI) 1);
} LA10: ;
}
}
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (NimStringDesc*)0;
LOC20 = (NI)0;
LOC20 = len_293081_850551059(n0);
LOC21 = (NimStringDesc*)0;
LOC21 = nsuRepeatStr(((NimStringDesc*) &T839829468_517), ((NI) (LOC20)));
LOC19 = rawNewString(LOC21->Sup.len + tnl_176644_4151366050->Sup.len + 0);
appendString(LOC19, LOC21);
appendString(LOC19, tnl_176644_4151366050);
LOC18[0] = makecstring_191638_155036129(LOC19);
LOC18[1] = args0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_516), LOC18, 2);
memset((void*)LOC22, 0, sizeof(LOC22));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_518), LOC22, 0);
}
N_NIMCALL(void, genseqconstr_555004_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Tloc292816 arr0;
NI LOC5;
Ropeobj178006* LOC6;
memset((void*)(&arr0), 0, sizeof(arr0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA3: ;
LOC5 = (NI)0;
LOC5 = sonslen_295351_850551059(t0);
LOC6 = (Ropeobj178006*)0;
LOC6 = intliteral_539270_839829468(((NI64) (LOC5)));
gennewseqaux_554795_839829468(p0, (*d0), LOC6);
{
NI i_555031_839829468;
NI HEX3Atmp_555039_839829468;
NI LOC8;
NI res_555042_839829468;
i_555031_839829468 = (NI)0;
HEX3Atmp_555039_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = sonslen_295351_850551059(t0);
HEX3Atmp_555039_839829468 = (NI)(LOC8 - ((NI) 1));
res_555042_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC11;
Ttype292840* LOC12;
TY532811 LOC13;
if (!(res_555042_839829468 <= HEX3Atmp_555039_839829468)) goto LA10;
i_555031_839829468 = res_555042_839829468;
LOC11 = (Ttype292840*)0;
LOC11 = skiptypes_296099_850551059((*t0).typ, IL64(211106232576256));
LOC12 = (Ttype292840*)0;
LOC12 = elemtype_320394_3876443242(LOC11);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC12, ((Tstorageloc292812) 3));
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = rdloc_538188_839829468((*d0));
LOC13[1] = intliteral_539270_839829468(((NI64) (i_555031_839829468)));
arr0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC13, 2);
arr0.s = ((Tstorageloc292812) 3);
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[i_555031_839829468], (&arr0));
res_555042_839829468 += ((NI) 1);
} LA10: ;
}
}
gcusage_554439_839829468(t0);
}
N_NIMCALL(void, genarrtoseq_555046_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Tloc292816 elem0;
Tloc292816 a0;
Tloc292816 arr0;
NI L0;
NI64 LOC9;
Ropeobj178006* LOC10;
{ memset((void*)(&elem0), 0, sizeof(elem0));
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&arr0), 0, sizeof(arr0));
{
if (!((*t0).kind == ((Tnodekind292020) 41))) goto LA3;
asgnRefNoCycle((void**) (&(*(*t0).kindU.S6.sons->data[((NI) 1)]).typ), (*t0).typ);
genseqconstr_555004_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], d0);
goto BeforeRet;
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA7;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA7: ;
LOC9 = (NI64)0;
LOC9 = lengthord_320007_3876443242((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ);
L0 = ((NI) (LOC9));
LOC10 = (Ropeobj178006*)0;
LOC10 = intliteral_539270_839829468(((NI64) (L0)));
gennewseqaux_554795_839829468(p0, (*d0), LOC10);
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], (&a0));
{
NI i_555090_839829468;
NI HEX3Atmp_555103_839829468;
NI res_555106_839829468;
i_555090_839829468 = (NI)0;
HEX3Atmp_555103_839829468 = (NI)0;
HEX3Atmp_555103_839829468 = (NI)(L0 - ((NI) 1));
res_555106_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC14;
Ttype292840* LOC15;
TY532811 LOC16;
Ttype292840* LOC17;
Ttype292840* LOC18;
TY532811 LOC19;
if (!(res_555106_839829468 <= HEX3Atmp_555103_839829468)) goto LA13;
i_555090_839829468 = res_555106_839829468;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*t0).typ, IL64(211106232576256));
LOC15 = (Ttype292840*)0;
LOC15 = elemtype_320394_3876443242(LOC14);
initloc_532273_839829468((&elem0), ((Tlockind292808) 6), LOC15, ((Tstorageloc292812) 3));
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((*d0));
LOC16[1] = intliteral_539270_839829468(((NI64) (i_555090_839829468)));
elem0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC16, 2);
elem0.s = ((Tstorageloc292812) 3);
LOC17 = (Ttype292840*)0;
LOC17 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106232576256));
LOC18 = (Ttype292840*)0;
LOC18 = elemtype_320394_3876443242(LOC17);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC18, a0.s);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468(a0);
LOC19[1] = intliteral_539270_839829468(((NI64) (i_555090_839829468)));
arr0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC19, 2);
genassignment_539264_839829468(p0, elem0, arr0, 3);
res_555106_839829468 += ((NI) 1);
} LA13: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, gendeepcopy_550374_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0) {
Ttype292840* ty0;
ty0 = skiptypes_296099_850551059(dest0.t, IL64(211106242013440));
switch ((*ty0).kind) {
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 25):
case ((Ttypekind292244) 18):
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY535238 LOC2;
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = addrloc_538204_839829468(dest0);
LOC2[1] = addrloc_538204_839829468(src0);
LOC2[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_519), LOC2, 3);
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 28):
{
TY535238 LOC4;
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = addrloc_538204_839829468(dest0);
LOC4[1] = rdloc_538188_839829468(src0);
LOC4[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_520), LOC4, 3);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY535238 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = addrloc_538204_839829468(dest0);
LOC6[1] = addrloc_538204_839829468(src0);
LOC6[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_521), LOC6, 3);
}
break;
case ((Ttypekind292244) 19):
{
{
Tctypekind529007 LOC10;
TY535238 LOC13;
NI64 LOC14;
LOC10 = (Tctypekind529007)0;
LOC10 = maptype_533393_839829468(ty0);
if (!(LOC10 == ((Tctypekind529007) 17))) goto LA11;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = rdloc_538188_839829468(dest0);
LOC13[1] = rdloc_538188_839829468(src0);
LOC14 = (NI64)0;
LOC14 = getsize_320135_3876443242(dest0.t);
LOC13[2] = rope_178401_2381377266(LOC14);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_268), LOC13, 3);
}
goto LA8;
LA11: ;
{
TY532811 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468(dest0);
LOC16[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC16, 2);
}
LA8: ;
}
break;
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 23):
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(dest0);
LOC18[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC18, 2);
}
break;
default:
{
NimStringDesc* LOC20;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC20, ((NimStringDesc*) &T839829468_522));
appendString(LOC20, reprEnum((NI)(*ty0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC20);
}
break;
}
}
N_NIMCALL(void, genmagicexpr_557033_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
switch (op0) {
case ((Tmagic292524) 127):
case ((Tmagic292524) 126):
{
genandor_554311_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 99) ... ((Tmagic292524) 117):
{
unaryarith_552646_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 96) ... ((Tmagic292524) 98):
{
unaryarithoverflow_551633_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 52) ... ((Tmagic292524) 55):
{
binaryfloatarith_556728_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 56) ... ((Tmagic292524) 93):
{
binaryarith_551819_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 95):
{
geneqproc_552214_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 45) ... ((Tmagic292524) 51):
{
binaryarithoverflow_551262_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 149):
{
genrepr_555339_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 259):
{
gengettypeinfo_555383_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 156):
{
genswap_555638_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 25):
{
{
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0))) goto LA14;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_385));
}
goto LA12;
LA14: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_386));
}
LA12: ;
}
break;
case ((Tmagic292524) 26):
case ((Tmagic292524) 27):
{
Ttype292840* underlying0;
underlying0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 9439232);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = !((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0));
if (LOC20) goto LA21;
LOC20 = ((*underlying0).kind >= ((Ttypekind292244) 40) && (*underlying0).kind <= ((Ttypekind292244) 44));
LA21: ;
if (!LOC20) goto LA22;
binarystmt_550501_839829468(p0, e0, d0, opr_557050_839829468[(op0)- 26]);
}
goto LA18;
LA22: ;
{
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ranged0;
Ropeobj178006* res0;
NimStringDesc* LOC25;
TY532811 LOC31;
Ropeobj178006* LOC32;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
ranged0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 8390656);
LOC25 = (NimStringDesc*)0;
{
if (!((*underlying0).kind == ((Ttypekind292244) 35))) goto LA28;
LOC25 = copyString(fun64_557055_839829468[(op0)- 26]);
}
goto LA26;
LA28: ;
{
LOC25 = copyString(fun_557060_839829468[(op0)- 26]);
}
LA26: ;
res0 = binaryarithoverflowraw_551235_839829468(p0, ranged0, a0, b0, LOC25);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = gettypedesc_535671_839829468((*p0).module, ranged0);
LOC31[1] = res0;
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_370), LOC31, 2);
putintodest_550468_839829468(p0, (&a0), ranged0, LOC32, ((Tstorageloc292812) 0));
}
LA18: ;
}
break;
case ((Tmagic292524) 138):
{
genstrconcat_554452_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 144):
{
binarystmt_550501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_394));
}
break;
case ((Tmagic292524) 145):
{
genstrappend_554554_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 146):
{
genseqelemappend_554683_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 128):
{
genstrequals_556666_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 129):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_402));
}
break;
case ((Tmagic292524) 130):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_403));
}
break;
case ((Tmagic292524) 157):
{
genisnil_552620_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 120):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_406));
}
break;
case ((Tmagic292524) 121):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_407));
}
break;
case ((Tmagic292524) 119):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_408));
}
break;
case ((Tmagic292524) 118):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_409));
}
break;
case ((Tmagic292524) 122):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_410));
}
break;
case ((Tmagic292524) 123):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_411));
}
break;
case ((Tmagic292524) 124):
{
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Tmagic292524) 125):
{
genrepr_555339_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 12):
{
genof_555331_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 29):
{
gennew_554782_839829468(p0, e0);
}
break;
case ((Tmagic292524) 30):
{
gennewfinalize_555110_839829468(p0, e0);
}
break;
case ((Tmagic292524) 31):
{
gennewseq_554824_839829468(p0, e0);
}
break;
case ((Tmagic292524) 32):
{
gennewseqofcap_554836_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 9):
{
Ttype292840* t0;
TY178507 LOC55;
Ropeobj178006* LOC56;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 256);
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC56 = (Ropeobj178006*)0;
LOC56 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_428), LOC55, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC56, ((Tstorageloc292812) 0));
}
break;
case ((Tmagic292524) 42):
{
gensomecast_556480_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 28):
{
genord_556474_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 35):
case ((Tmagic292524) 8):
case ((Tmagic292524) 34):
case ((Tmagic292524) 36):
case ((Tmagic292524) 33):
{
genarraylen_555415_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 37):
case ((Tmagic292524) 38):
{
{
NIM_BOOL LOC63;
LOC63 = (NIM_BOOL)0;
LOC63 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC63) goto LA64;
LOC63 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA64: ;
if (!!(LOC63)) goto LA65;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_440));
}
goto LA61;
LA65: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_441));
}
LA61: ;
}
break;
case ((Tmagic292524) 43):
{
unarystmt_550527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_443));
}
break;
case ((Tmagic292524) 44):
{
unarystmt_550527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_444));
}
break;
case ((Tmagic292524) 151):
{
gensetlengthstr_555632_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 152):
{
gensetlengthseq_555500_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 39):
case ((Tmagic292524) 40):
case ((Tmagic292524) 41):
case ((Tmagic292524) 133):
case ((Tmagic292524) 132):
case ((Tmagic292524) 131):
case ((Tmagic292524) 134):
case ((Tmagic292524) 135):
case ((Tmagic292524) 136):
case ((Tmagic292524) 148):
{
gensetop_556419_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 161):
case ((Tmagic292524) 162):
case ((Tmagic292524) 159):
case ((Tmagic292524) 160):
case ((Tmagic292524) 150):
case ((Tmagic292524) 163):
{
Tsym292834* opr0;
opr0 = (*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NimStringDesc* LOC78;
Ropeobj178006* LOC79;
if (!!((((*opr0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0))) goto LA76;
LOC78 = (NimStringDesc*)0;
LOC78 = HEX24_178856_2381377266((*opr0).loc.r);
LOC79 = (Ropeobj178006*)0;
LOC79 = cgsym_532403_839829468((*p0).module, LOC78);
}
LA76: ;
gencall_543632_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 164):
{
genreset_554731_839829468(p0, e0);
}
break;
case ((Tmagic292524) 17):
{
Tnode292802* LOC82;
Tnode292802* LOC83;
LOC82 = (Tnode292802*)0;
LOC82 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
LOC83 = (Tnode292802*)0;
LOC83 = skipconv_328882_3876443242(LOC82);
genecho_554369_839829468(p0, LOC83);
}
break;
case ((Tmagic292524) 158):
{
genarrtoseq_555046_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 223) ... ((Tmagic292524) 257):
case ((Tmagic292524) 19) ... ((Tmagic292524) 24):
{
localerror_196080_155036129((*e0).info, ((Tmsgkind191002) 229), (*(*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s);
}
break;
case ((Tmagic292524) 208):
{
Tnode292802* n0;
n0 = wrapprocforspawn_435501_2218250499((*(*p0).module).module, e0, (*e0).typ, NIM_NIL, NIM_NIL);
expr_539248_839829468(p0, n0, d0);
}
break;
case ((Tmagic292524) 155):
{
Tnode292802* n0;
n0 = liftparallel_478822_1773027539((*(*p0).module).module, e0);
expr_539248_839829468(p0, n0, d0);
}
break;
case ((Tmagic292524) 209):
{
Tloc292816 a0;
Tloc292816 b0;
Tnode292802* x0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
{
Tnode292802* LOC91;
Tnode292802* LOC94;
LOC91 = (Tnode292802*)0;
LOC91 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
if (!((*LOC91).kind == ((Tnodekind292020) 63) || (*LOC91).kind == ((Tnodekind292020) 64))) goto LA92;
LOC94 = (Tnode292802*)0;
LOC94 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
x0 = HEX5BHEX5D_293238_850551059(LOC94, ((NI) 0));
}
goto LA89;
LA92: ;
{
x0 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
}
LA89: ;
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
gendeepcopy_550374_839829468(p0, a0, b0);
}
break;
case ((Tmagic292524) 140):
case ((Tmagic292524) 94):
{
gencall_543632_839829468(p0, e0, d0);
}
break;
default:
{
NimStringDesc* LOC98;
LOC98 = (NimStringDesc*)0;
LOC98 = rawNewString(reprEnum((NI)op0, (&NTI292524))->Sup.len + 14);
appendString(LOC98, ((NimStringDesc*) &T839829468_523));
appendString(LOC98, reprEnum((NI)op0, (&NTI292524)));
internalerror_196100_155036129((*e0).info, LOC98);
}
break;
}
}
N_NIMCALL(Ropeobj178006*, gensetnode_549664_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tbitset339004* cs0;
NI size0;
NI64 LOC1;
result0 = (Ropeobj178006*)0;
cs0 = (Tbitset339004*)0;
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242((*n0).typ);
size0 = ((NI) (LOC1));
tobitset_340001_452470228(n0, (&cs0));
{
NI id0;
Ropeobj178006* LOC6;
if (!(((NI) 8) < size0)) goto LA4;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178401_2381377266(((NI64) (id0)));
result0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC6);
{
TY535238 LOC11;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA9;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535671_839829468((*p0).module, (*n0).typ);
LOC11[1] = result0;
LOC11[2] = genrawsetdata_549629_839829468(cs0, size0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC11, 3);
}
LA9: ;
}
goto LA2;
LA4: ;
{
result0 = genrawsetdata_549629_839829468(cs0, size0);
}
LA2: ;
return result0;
}
N_NIMCALL(void, gensetconstr_557496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 idx0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&idx0), 0, sizeof(idx0));
{
Ropeobj178006* LOC5;
if (!(((*e0).flags &(1U<<((NU)(((Tnodeflag292427) 4))&15U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = gensetnode_549664_839829468(p0, e0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC5, ((Tstorageloc292812) 0));
}
goto LA1;
LA3: ;
{
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA9;
gettemp_537032_839829468(p0, (*e0).typ, d0, NIM_FALSE);
}
LA9: ;
{
NI64 LOC13;
TY178507 LOC16;
LOC13 = (NI64)0;
LOC13 = getsize_320135_3876443242((*e0).typ);
if (!(IL64(8) < LOC13)) goto LA14;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((*d0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_525), LOC16, 1);
{
NI i_557537_839829468;
NI HEX3Atmp_557603_839829468;
NI LOC18;
NI res_557606_839829468;
i_557537_839829468 = (NI)0;
HEX3Atmp_557603_839829468 = (NI)0;
LOC18 = (NI)0;
LOC18 = sonslen_295351_850551059(e0);
HEX3Atmp_557603_839829468 = (NI)(LOC18 - ((NI) 1));
res_557606_839829468 = ((NI) 0);
{
while (1) {
if (!(res_557606_839829468 <= HEX3Atmp_557603_839829468)) goto LA20;
i_557537_839829468 = res_557606_839829468;
{
Ttype292840* LOC25;
TY535235 LOC26;
if (!((*(*e0).kindU.S6.sons->data[i_557537_839829468]).kind == ((Tnodekind292020) 44))) goto LA23;
LOC25 = (Ttype292840*)0;
LOC25 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC25, (&idx0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557537_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557537_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0));
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rdloc_538188_839829468(idx0);
LOC26[1] = rdloc_538188_839829468((*d0));
LOC26[2] = rdsetelemloc_555662_839829468(a0, (*e0).typ);
LOC26[3] = rdsetelemloc_555662_839829468(b0, (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_526), LOC26, 4);
}
goto LA21;
LA23: ;
{
TY532811 LOC28;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[i_557537_839829468], (&a0));
memset((void*)LOC28, 0, sizeof(LOC28));
LOC28[0] = rdloc_538188_839829468((*d0));
LOC28[1] = rdsetelemloc_555662_839829468(a0, (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_527), LOC28, 2);
}
LA21: ;
res_557606_839829468 += ((NI) 1);
} LA20: ;
}
}
}
goto LA11;
LA14: ;
{
NimStringDesc* ts0;
NimStringDesc* LOC30;
NI64 LOC31;
NimStringDesc* LOC32;
TY178507 LOC33;
LOC30 = (NimStringDesc*)0;
LOC31 = (NI64)0;
LOC31 = getsize_320135_3876443242((*e0).typ);
LOC32 = (NimStringDesc*)0;
LOC32 = nimInt64ToStr((NI64)(LOC31 * IL64(8)));
LOC30 = rawNewString(LOC32->Sup.len + 2);
appendString(LOC30, ((NimStringDesc*) &T839829468_45));
appendString(LOC30, LOC32);
ts0 = LOC30;
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rdloc_538188_839829468((*d0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_494), LOC33, 1);
{
NI i_557575_839829468;
NI HEX3Atmp_557611_839829468;
NI LOC35;
NI res_557614_839829468;
i_557575_839829468 = (NI)0;
HEX3Atmp_557611_839829468 = (NI)0;
LOC35 = (NI)0;
LOC35 = sonslen_295351_850551059(e0);
HEX3Atmp_557611_839829468 = (NI)(LOC35 - ((NI) 1));
res_557614_839829468 = ((NI) 0);
{
while (1) {
if (!(res_557614_839829468 <= HEX3Atmp_557611_839829468)) goto LA37;
i_557575_839829468 = res_557614_839829468;
{
Ttype292840* LOC42;
NimStringDesc* LOC43;
TY535235 LOC44;
if (!((*(*e0).kindU.S6.sons->data[i_557575_839829468]).kind == ((Tnodekind292020) 44))) goto LA40;
LOC42 = (Ttype292840*)0;
LOC42 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC42, (&idx0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557575_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557575_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0));
LOC43 = (NimStringDesc*)0;
LOC43 = rawNewString(ts0->Sup.len + ts0->Sup.len + 68);
appendString(LOC43, ((NimStringDesc*) &T839829468_528));
appendString(LOC43, ts0);
appendString(LOC43, ((NimStringDesc*) &T839829468_529));
appendString(LOC43, ts0);
appendString(LOC43, ((NimStringDesc*) &T839829468_454));
memset((void*)LOC44, 0, sizeof(LOC44));
LOC44[0] = rdloc_538188_839829468(idx0);
LOC44[1] = rdloc_538188_839829468((*d0));
LOC44[2] = rdsetelemloc_555662_839829468(a0, (*e0).typ);
LOC44[3] = rdsetelemloc_555662_839829468(b0, (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC43, LOC44, 4);
}
goto LA38;
LA40: ;
{
NimStringDesc* LOC46;
TY532811 LOC47;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[i_557575_839829468], (&a0));
LOC46 = (NimStringDesc*)0;
LOC46 = rawNewString(ts0->Sup.len + ts0->Sup.len + 36);
appendString(LOC46, ((NimStringDesc*) &T839829468_530));
appendString(LOC46, ts0);
appendString(LOC46, ((NimStringDesc*) &T839829468_531));
appendString(LOC46, ts0);
appendString(LOC46, ((NimStringDesc*) &T839829468_454));
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = rdloc_538188_839829468((*d0));
LOC47[1] = rdsetelemloc_555662_839829468(a0, (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC46, LOC47, 2);
}
LA38: ;
res_557614_839829468 += ((NI) 1);
} LA37: ;
}
}
}
LA11: ;
}
LA1: ;
}
N_NIMCALL(void, exprcomplexconst_558684_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
NI id0;
Ropeobj178006* tmp0;
Ropeobj178006* LOC2;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC1 = (Ropeobj178006*)0;
LOC1 = gettypedesc_535671_839829468((*p0).module, t0);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC2 = (Ropeobj178006*)0;
LOC2 = rope_178401_2381377266(((NI64) (id0)));
tmp0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC2);
{
TY535238 LOC7;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA5;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC7[1] = tmp0;
LOC7[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC7, 3);
}
LA5: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA10;
fillloc_532282_839829468(d0, ((Tlockind292808) 8), t0, tmp0, ((Tstorageloc292812) 1));
}
goto LA8;
LA10: ;
{
putdataintodest_550436_839829468(p0, d0, t0, tmp0);
{
if (!!(((*t0).kind == ((Ttypekind292244) 24) || (*t0).kind == ((Ttypekind292244) 28)))) goto LA15;
(*d0).s = ((Tstorageloc292812) 1);
}
LA15: ;
}
LA8: ;
}
N_NIMCALL(NIM_BOOL, handleconstexpr_554853_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
NI LOC6;
Ttype292840* t0;
Ropeobj178006* LOC10;
NI id0;
Ropeobj178006* LOC11;
Ropeobj178006* LOC12;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = ((*d0).k == ((Tlockind292808) 0));
if (!(LOC4)) goto LA5;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(n0);
LOC4 = (((NI) (((*n0).kind == ((Tnodekind292020) 38)))) < LOC6);
LA5: ;
LOC3 = LOC4;
if (!(LOC3)) goto LA7;
LOC3 = isdeepconstexpr_318566_2616423590(n0);
LA7: ;
if (!LOC3) goto LA8;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC10 = (Ropeobj178006*)0;
LOC10 = gettypedesc_535671_839829468((*p0).module, t0);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC11 = (Ropeobj178006*)0;
LOC11 = rope_178401_2381377266(((NI64) (id0)));
LOC12 = (Ropeobj178006*)0;
LOC12 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC11);
fillloc_532282_839829468(d0, ((Tlockind292808) 8), t0, LOC12, ((Tstorageloc292812) 1));
{
TY535238 LOC17;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA15;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC17[1] = (*d0).r;
LOC17[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3);
}
LA15: ;
result0 = NIM_TRUE;
}
goto LA1;
LA8: ;
{
result0 = NIM_FALSE;
}
LA1: ;
return result0;
}
N_NIMCALL(void, genarrayconstr_558207_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 arr0;
memset((void*)(&arr0), 0, sizeof(arr0));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, n0, d0);
if (!!(LOC3)) goto LA4;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA8;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA8: ;
{
NI i_558234_839829468;
NI HEX3Atmp_558242_839829468;
NI LOC11;
NI res_558245_839829468;
i_558234_839829468 = (NI)0;
HEX3Atmp_558242_839829468 = (NI)0;
LOC11 = (NI)0;
LOC11 = sonslen_295351_850551059(n0);
HEX3Atmp_558242_839829468 = (NI)(LOC11 - ((NI) 1));
res_558245_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC14;
Ttype292840* LOC15;
TY532811 LOC16;
if (!(res_558245_839829468 <= HEX3Atmp_558242_839829468)) goto LA13;
i_558234_839829468 = res_558245_839829468;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC15 = (Ttype292840*)0;
LOC15 = elemtype_320394_3876443242(LOC14);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC15, (*d0).s);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((*d0));
LOC16[1] = intliteral_539270_839829468(((NI64) (i_558234_839829468)));
arr0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_138), LOC16, 2);
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[i_558234_839829468], (&arr0));
res_558245_839829468 += ((NI) 1);
} LA13: ;
}
}
}
LA4: ;
}
N_NIMCALL(void, gentupleconstr_557618_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 rec0;
memset((void*)(&rec0), 0, sizeof(rec0));
{
NIM_BOOL LOC3;
Ttype292840* t0;
Ropeobj178006* LOC6;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, n0, d0);
if (!!(LOC3)) goto LA4;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC6 = (Ropeobj178006*)0;
LOC6 = gettypedesc_535671_839829468((*p0).module, t0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA9;
gettemp_537032_839829468(p0, t0, d0, NIM_FALSE);
}
LA9: ;
{
NI i_557646_839829468;
NI HEX3Atmp_557803_839829468;
NI LOC12;
NI res_557806_839829468;
i_557646_839829468 = (NI)0;
HEX3Atmp_557803_839829468 = (NI)0;
LOC12 = (NI)0;
LOC12 = sonslen_295351_850551059(n0);
HEX3Atmp_557803_839829468 = (NI)(LOC12 - ((NI) 1));
res_557806_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
TY532811 LOC19;
if (!(res_557806_839829468 <= HEX3Atmp_557803_839829468)) goto LA14;
i_557646_839829468 = res_557806_839829468;
it0 = (*n0).kindU.S6.sons->data[i_557646_839829468];
{
if (!((*it0).kind == ((Tnodekind292020) 34))) goto LA17;
it0 = (*it0).kindU.S6.sons->data[((NI) 1)];
}
LA17: ;
initloc_532273_839829468((&rec0), ((Tlockind292808) 6), (*it0).typ, (*d0).s);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((*d0));
LOC19[1] = rope_178401_2381377266(((NI64) (i_557646_839829468)));
rec0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_185), LOC19, 2);
expr_539248_839829468(p0, it0, (&rec0));
res_557806_839829468 += ((NI) 1);
} LA14: ;
}
}
}
LA4: ;
}
N_NIMCALL(Tsym292834*, lookupfieldagain_553153_839829468)(Tcproc529021* p0, Ttype292840* ty_553156_839829468, Tsym292834* field0, Ropeobj178006** r0) {
Tsym292834* result0;
Ttype292840* ty0;
result0 = (Tsym292834*)0;
ty0 = ty_553156_839829468;
{
while (1) {
if (!!((ty0 == NIM_NIL))) goto LA2;
ty0 = skiptypes_296099_850551059(ty0, IL64(211106247215360));
result0 = lookupinrecord_299119_2984716966((*ty0).n, (*field0).name);
{
if (!!((result0 == NIM_NIL))) goto LA5;
goto LA1;
}
LA5: ;
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC9) goto LA10;
LOC9 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA10: ;
if (!!(LOC9)) goto LA11;
add_178487_2381377266(r0, ((NimStringDesc*) &T839829468_153));
}
LA11: ;
ty0 = getuniquetype_528640_2036603609((*ty0).sons->data[((NI) 0)]);
} LA2: ;
} LA1: ;
{
if (!(result0 == NIM_NIL)) goto LA15;
internalerror_196100_155036129((*field0).info, ((NimStringDesc*) &T839829468_532));
}
LA15: ;
return result0;
}
N_NIMCALL(void, genfieldcheck_553504_839829468)(Tcproc529021* p0, Tnode292802* e0, Ropeobj178006* obj0, Tsym292834* field0, Ttype292840* origty0) {
Tloc292816 test0;
Tloc292816 u0;
Tloc292816 v0;
memset((void*)(&test0), 0, sizeof(test0));
memset((void*)(&u0), 0, sizeof(u0));
memset((void*)(&v0), 0, sizeof(v0));
{
NI i_553525_839829468;
NI HEX3Atmp_554039_839829468;
NI LOC2;
NI res_554042_839829468;
i_553525_839829468 = (NI)0;
HEX3Atmp_554039_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554039_839829468 = (NI)(LOC2 - ((NI) 1));
res_554042_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* it0;
Tsym292834* op0;
Tnode292802* disc0;
Ropeobj178006* o0;
Tsym292834* d0;
NI id0;
Tnode292802* LOC9;
Ropeobj178006* strlit0;
if (!(res_554042_839829468 <= HEX3Atmp_554039_839829468)) goto LA4;
i_553525_839829468 = res_554042_839829468;
it0 = (*e0).kindU.S6.sons->data[i_553525_839829468];
op0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
if (!((*op0).magic == ((Tmagic292524) 99))) goto LA7;
it0 = (*it0).kindU.S6.sons->data[((NI) 1)];
}
LA7: ;
disc0 = skipconv_328882_3876443242((*it0).kindU.S6.sons->data[((NI) 2)]);
initloc_532273_839829468((&test0), ((Tlockind292808) 0), (*it0).typ, ((Tstorageloc292812) 2));
initlocexpr_539283_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&u0));
o0 = obj0;
d0 = lookupfieldagain_553153_839829468(p0, origty0, (*disc0).kindU.S4.sym, &o0);
initloc_532273_839829468((&v0), ((Tlockind292808) 6), (*d0).typ, ((Tstorageloc292812) 0));
v0.r = o0;
add_178487_2381377266(&v0.r, ((NimStringDesc*) &T839829468_257));
add_178482_2381377266(&v0.r, (*d0).loc.r);
geninexpraux_553496_839829468(p0, it0, (&u0), (&v0), (&test0));
LOC9 = (Tnode292802*)0;
LOC9 = newstrnode_293678_850551059(((Tnodekind292020) 20), (*(*field0).name).s);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), LOC9, ((NI) ((*(*p0).module).labels)));
{
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA12;
strlit0 = getstrlit_549468_839829468((*p0).module, (*(*field0).name).s);
}
goto LA10;
LA12: ;
{
Ropeobj178006* LOC15;
LOC15 = (Ropeobj178006*)0;
LOC15 = rope_178401_2381377266(((NI64) (id0)));
strlit0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC15);
}
LA10: ;
{
TY532811 LOC20;
if (!((*op0).magic == ((Tmagic292524) 99))) goto LA18;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rdloc_538188_839829468(test0);
LOC20[1] = strlit0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_534), LOC20, 2);
}
goto LA16;
LA18: ;
{
TY532811 LOC22;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = rdloc_538188_839829468(test0);
LOC22[1] = strlit0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_535), LOC22, 2);
}
LA16: ;
res_554042_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, genobjconstr_554903_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 tmp0;
Ttype292840* t0;
NIM_BOOL isref0;
Ropeobj178006* r0;
Ropeobj178006* LOC13;
Ttype292840* ty0;
{ {
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, e0, d0);
if (!LOC3) goto LA4;
goto BeforeRet;
}
LA4: ;
memset((void*)(&tmp0), 0, sizeof(tmp0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106232576256));
gettemp_537032_839829468(p0, t0, (&tmp0), NIM_FALSE);
isref0 = ((*t0).kind == ((Ttypekind292244) 22));
r0 = rdloc_538188_839829468(tmp0);
{
Ttype292840* LOC10;
TY178507 LOC11;
if (!isref0) goto LA8;
rawgennew_554741_839829468(p0, tmp0, NIM_NIL);
LOC10 = (Ttype292840*)0;
LOC10 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC10, IL64(211106232576256));
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC11, 1);
gcusage_554439_839829468(e0);
}
goto LA6;
LA8: ;
{
constructloc_538388_839829468(p0, tmp0, NIM_FALSE);
}
LA6: ;
LOC13 = (Ropeobj178006*)0;
LOC13 = gettypedesc_535671_839829468((*p0).module, t0);
ty0 = getuniquetype_528640_2036603609(t0);
{
NI i_554944_839829468;
NI HEX3Atmp_554997_839829468;
NI LOC15;
NI res_555000_839829468;
i_554944_839829468 = (NI)0;
HEX3Atmp_554997_839829468 = (NI)0;
LOC15 = (NI)0;
LOC15 = len_293081_850551059(e0);
HEX3Atmp_554997_839829468 = (LOC15 - 1);
res_555000_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* it0;
Tloc292816 tmp20;
Tsym292834* field0;
if (!(res_555000_839829468 <= HEX3Atmp_554997_839829468)) goto LA17;
i_554944_839829468 = res_555000_839829468;
it0 = (*e0).kindU.S6.sons->data[i_554944_839829468];
memset((void*)(&tmp20), 0, sizeof(tmp20));
tmp20.r = r0;
field0 = lookupfieldagain_553153_839829468(p0, ty0, (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym, &tmp20.r);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA20;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_533));
}
LA20: ;
{
NIM_BOOL LOC24;
NI LOC25;
LOC24 = (NIM_BOOL)0;
LOC25 = (NI)0;
LOC25 = len_293081_850551059(it0);
LOC24 = (LOC25 == ((NI) 3));
if (!(LOC24)) goto LA26;
LOC24 = (((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0);
LA26: ;
if (!LOC24) goto LA27;
genfieldcheck_553504_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 2)], r0, field0, ty0);
}
LA27: ;
add_178487_2381377266(&tmp20.r, ((NimStringDesc*) &T839829468_257));
add_178482_2381377266(&tmp20.r, (*field0).loc.r);
tmp20.k = ((Tlockind292808) 1);
tmp20.t = (*field0).loc.t;
{
if (!isref0) goto LA31;
tmp20.s = ((Tstorageloc292812) 3);
}
goto LA29;
LA31: ;
{
tmp20.s = ((Tstorageloc292812) 2);
}
LA29: ;
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&tmp20));
res_555000_839829468 += ((NI) 1);
} LA17: ;
}
}
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA36;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA34;
LA36: ;
{
genassignment_539264_839829468(p0, (*d0), tmp0, 0);
}
LA34: ;
}BeforeRet: ;
}
N_NIMCALL(void, gencast_556537_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* destt0;
Ttype292840* srct0;
destt0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
srct0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
Ropeobj178006* lbl0;
Tloc292816 tmp0;
TY178507 LOC7;
TY535238 LOC8;
TY178507 LOC9;
Ropeobj178006* LOC10;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*destt0).kind >= ((Ttypekind292244) 36) && (*destt0).kind <= ((Ttypekind292244) 39) || (*destt0).kind == ((Ttypekind292244) 18) || (*destt0).kind == ((Ttypekind292244) 17) || (*destt0).kind == ((Ttypekind292244) 16) || (*destt0).kind == ((Ttypekind292244) 4));
if (LOC3) goto LA4;
LOC3 = ((*srct0).kind >= ((Ttypekind292244) 36) && (*srct0).kind <= ((Ttypekind292244) 39) || (*srct0).kind == ((Ttypekind292244) 18) || (*srct0).kind == ((Ttypekind292244) 17) || (*srct0).kind == ((Ttypekind292244) 16) || (*srct0).kind == ((Ttypekind292244) 4));
LA4: ;
if (!LOC3) goto LA5;
(*p0).labels += ((NI) 1);
lbl0 = rope_178401_2381377266(((NI64) ((*p0).labels)));
memset((void*)(&tmp0), 0, sizeof(tmp0));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = lbl0;
tmp0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_536), LOC7, 1);
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = gettypedesc_535671_839829468((*p0).module, srct0);
LOC8[1] = gettypedesc_535671_839829468((*p0).module, destt0);
LOC8[2] = lbl0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_537), LOC8, 3);
tmp0.k = ((Tlockind292808) 6);
tmp0.t = srct0;
tmp0.s = ((Tstorageloc292812) 2);
tmp0.flags = 0;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = lbl0;
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_538), LOC9, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC10, tmp0.s);
}
goto LA1;
LA5: ;
{
gensomecast_556480_839829468(p0, e0, d0);
}
LA1: ;
}
N_NIMCALL(void, genconv_556632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* desttype0;
desttype0 = skiptypes_296099_850551059((*e0).typ, 8390656);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = comparetypes_326214_3876443242(desttype0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, ((Tdistinctcompare324427) 1), 0);
if (!LOC3) goto LA4;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0);
}
goto LA1;
LA4: ;
{
gensomecast_556480_839829468(p0, e0, d0);
}
LA1: ;
}
static N_INLINE(NIM_BOOL, iscppref_552807_839829468)(Tcproc529021* p0, Ttype292840* typ0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
NIM_BOOL LOC3;
Ttype292840* LOC6;
Ttype292840* LOC8;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
LOC2 = LOC3;
if (!(LOC2)) goto LA5;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
LOC2 = ((*LOC6).kind == ((Ttypekind292244) 23));
LA5: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA7;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
LOC1 = !((((*LOC8).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA7: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genaddr_553051_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Ttype292840* LOC3;
Tloc292816 a0;
Ropeobj178006* LOC6;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
if (!((*LOC3).kind == ((Ttypekind292244) 22) || (*LOC3).kind == ((Ttypekind292244) 21))) goto LA4;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC6 = (Ropeobj178006*)0;
LOC6 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), a0.r);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC6, a0.s);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC8;
Tctypekind529007 LOC9;
LOC8 = (NIM_BOOL)0;
LOC9 = (Tctypekind529007)0;
LOC9 = maptype_533393_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
LOC8 = (LOC9 == ((Tctypekind529007) 17));
if (LOC8) goto LA10;
LOC8 = iscppref_552807_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
LA10: ;
if (!LOC8) goto LA11;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
}
goto LA1;
LA11: ;
{
Tloc292816 a0;
Ropeobj178006* LOC14;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC14 = (Ropeobj178006*)0;
LOC14 = addrloc_538204_839829468(a0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC14, a0.s);
}
LA1: ;
}
N_NIMCALL(void, genarrayelem_554093_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC1;
Ropeobj178006* first0;
NI64 LOC2;
Ttype292840* LOC47;
Ttype292840* LOC48;
TY535238 LOC49;
Ropeobj178006* LOC50;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
ty0 = skiptypes_296099_850551059(LOC1, IL64(211106247256320));
LOC2 = (NI64)0;
LOC2 = firstord_320001_3876443242(ty0);
first0 = intliteral_539270_839829468(LOC2);
{
NIM_BOOL LOC5;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0);
if (!(LOC5)) goto LA6;
LOC5 = !((((*ty0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0));
LA6: ;
if (!LOC5) goto LA7;
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = isconstexpr_318510_2616423590(y0);
if (!!(LOC11)) goto LA12;
{
NI64 LOC16;
LOC16 = (NI64)0;
LOC16 = firstord_320001_3876443242(ty0);
if (!(LOC16 == IL64(0))) goto LA17;
{
NIM_BOOL LOC21;
NI64 LOC22;
NI64 LOC23;
NI64 LOC25;
NI64 LOC26;
TY532811 LOC29;
NI64 LOC30;
LOC21 = (NIM_BOOL)0;
LOC22 = (NI64)0;
LOC22 = firstord_320001_3876443242(b0.t);
LOC23 = (NI64)0;
LOC23 = firstord_320001_3876443242(ty0);
LOC21 = (LOC22 < LOC23);
if (LOC21) goto LA24;
LOC25 = (NI64)0;
LOC25 = lastord_320004_3876443242(ty0);
LOC26 = (NI64)0;
LOC26 = lastord_320004_3876443242(b0.t);
LOC21 = (LOC25 < LOC26);
LA24: ;
if (!LOC21) goto LA27;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdcharloc_538227_839829468(b0);
LOC30 = (NI64)0;
LOC30 = lastord_320004_3876443242(ty0);
LOC29[1] = intliteral_539270_839829468(LOC30);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_539), LOC29, 2);
}
LA27: ;
}
goto LA14;
LA17: ;
{
TY535238 LOC32;
NI64 LOC33;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = rdcharloc_538227_839829468(b0);
LOC32[1] = first0;
LOC33 = (NI64)0;
LOC33 = lastord_320004_3876443242(ty0);
LOC32[2] = intliteral_539270_839829468(LOC33);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_540), LOC32, 3);
}
LA14: ;
}
goto LA9;
LA12: ;
{
NI64 idx0;
idx0 = getordvalue_320129_3876443242(y0);
{
NIM_BOOL LOC37;
NI64 LOC38;
NI64 LOC40;
LOC37 = (NIM_BOOL)0;
LOC38 = (NI64)0;
LOC38 = firstord_320001_3876443242(ty0);
LOC37 = (idx0 < LOC38);
if (LOC37) goto LA39;
LOC40 = (NI64)0;
LOC40 = lastord_320004_3876443242(ty0);
LOC37 = (LOC40 < idx0);
LA39: ;
if (!LOC37) goto LA41;
localerror_196080_155036129((*x0).info, ((Tmsgkind191002) 86), ((NimStringDesc*) &T839829468_490));
}
LA41: ;
}
LA9: ;
}
LA7: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA45;
(*d0).s = a0.s;
}
LA45: ;
LOC47 = (Ttype292840*)0;
LOC47 = skiptypes_296099_850551059(ty0, IL64(211106240964864));
LOC48 = (Ttype292840*)0;
LOC48 = elemtype_320394_3876443242(LOC47);
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468(a0);
LOC49[1] = rdcharloc_538227_839829468(b0);
LOC49[2] = first0;
LOC50 = (Ropeobj178006*)0;
LOC50 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_541), LOC49, 3);
putintodest_550468_839829468(p0, d0, LOC48, LOC50, a0.s);
}
N_NIMCALL(void, genopenarrayelem_554169_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* LOC10;
Ttype292840* LOC11;
TY532811 LOC12;
Ropeobj178006* LOC13;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
{
TY532811 LOC5;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(b0);
LOC5[1] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_542), LOC5, 2);
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA8;
(*d0).s = a0.s;
}
LA8: ;
LOC10 = (Ttype292840*)0;
LOC10 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
LOC11 = (Ttype292840*)0;
LOC11 = elemtype_320394_3876443242(LOC10);
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468(a0);
LOC12[1] = rdcharloc_538227_839829468(b0);
LOC13 = (Ropeobj178006*)0;
LOC13 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC12, 2);
putintodest_550468_839829468(p0, d0, LOC11, LOC13, a0.s);
}
N_NIMCALL(void, genseqelem_554205_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC27;
Ttype292840* LOC28;
TY532811 LOC29;
Ropeobj178006* LOC30;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
ty0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
{
Ttype292840* LOC5;
if (!((*ty0).kind == ((Ttypekind292244) 22) || (*ty0).kind == ((Ttypekind292244) 21))) goto LA3;
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(ty0);
ty0 = skiptypes_296099_850551059(LOC5, IL64(211106242013440));
}
LA3: ;
{
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0)) goto LA8;
{
TY535238 LOC14;
if (!((*ty0).kind == ((Ttypekind292244) 28))) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468(b0);
LOC14[1] = rdloc_538188_839829468(a0);
LOC14[2] = lenfield_539305_839829468(p0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_543), LOC14, 3);
}
goto LA10;
LA12: ;
{
TY535238 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468(b0);
LOC16[1] = rdloc_538188_839829468(a0);
LOC16[2] = lenfield_539305_839829468(p0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_544), LOC16, 3);
}
LA10: ;
}
LA8: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA19;
(*d0).s = ((Tstorageloc292812) 3);
}
LA19: ;
{
Ttype292840* LOC23;
TY178507 LOC26;
LOC23 = (Ttype292840*)0;
LOC23 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
if (!((*LOC23).kind == ((Ttypekind292244) 22) || (*LOC23).kind == ((Ttypekind292244) 21))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = a0.r;
a0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC26, 1);
}
LA24: ;
LOC27 = (Ttype292840*)0;
LOC27 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
LOC28 = (Ttype292840*)0;
LOC28 = elemtype_320394_3876443242(LOC27);
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdloc_538188_839829468(a0);
LOC29[1] = rdcharloc_538227_839829468(b0);
LOC30 = (Ropeobj178006*)0;
LOC30 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC29, 2);
putintodest_550468_839829468(p0, d0, LOC28, LOC30, a0.s);
}
N_NIMCALL(void, gencstringelem_554144_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC5;
Ttype292840* LOC6;
TY532811 LOC7;
Ropeobj178006* LOC8;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
ty0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
(*d0).s = a0.s;
}
LA3: ;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059(ty0, IL64(211106240964864));
LOC6 = (Ttype292840*)0;
LOC6 = elemtype_320394_3876443242(LOC5);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(a0);
LOC7[1] = rdcharloc_538227_839829468(b0);
LOC8 = (Ropeobj178006*)0;
LOC8 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC7, 2);
putintodest_550468_839829468(p0, d0, LOC6, LOC8, a0.s);
}
N_NIMCALL(void, gentupleelem_553124_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
NI i0;
Ropeobj178006* LOC5;
Ttype292840* ty0;
Ropeobj178006* r0;
TY178507 LOC8;
memset((void*)(&a0), 0, sizeof(a0));
i0 = (NI)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
(*d0).s = a0.s;
}
LA3: ;
LOC5 = (Ropeobj178006*)0;
LOC5 = gettypedesc_535671_839829468((*p0).module, a0.t);
ty0 = getuniquetype_528640_2036603609(a0.t);
r0 = rdloc_538188_839829468(a0);
switch ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind) {
case ((Tnodekind292020) 6) ... ((Tnodekind292020) 15):
{
i0 = ((NI) ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval));
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_545));
}
break;
}
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rope_178401_2381377266(((NI64) (i0)));
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC8, 1);
putintodest_550468_839829468(p0, d0, (*ty0).sons->data[i0], r0, a0.s);
}
N_NIMCALL(void, genbracketexpr_554277_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Ttype292840* ty0;
ty0 = skiptypes_296099_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440));
{
Ttype292840* LOC5;
if (!((*ty0).kind == ((Ttypekind292244) 22) || (*ty0).kind == ((Ttypekind292244) 21))) goto LA3;
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(ty0);
ty0 = skiptypes_296099_850551059(LOC5, IL64(211106242013440));
}
LA3: ;
switch ((*ty0).kind) {
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
genarrayelem_554093_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
genopenarrayelem_554169_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 28):
{
genseqelem_554205_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 29):
{
gencstringelem_554144_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 18):
{
gentupleelem_553124_839829468(p0, n0, d0);
}
break;
default:
{
NimStringDesc* LOC12;
LOC12 = (NimStringDesc*)0;
LOC12 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 21);
appendString(LOC12, ((NimStringDesc*) &T839829468_547));
appendString(LOC12, reprEnum((NI)(*ty0).kind, (&NTI292244)));
appendChar(LOC12, 41);
internalerror_196100_155036129((*n0).info, LOC12);
}
break;
}
}
N_NIMCALL(void, genderef_543921_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NIM_BOOL enforcederef0) {
Tctypekind529007 mt0;
{ mt0 = maptype_533393_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((393216 &(1U<<((NU)(mt0)&31U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = !(enforcederef0);
LA4: ;
if (!LOC3) goto LA5;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
{
Ttype292840* LOC9;
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
if (!((*LOC9).kind == ((Ttypekind292244) 22))) goto LA10;
(*d0).s = ((Tstorageloc292812) 3);
}
LA10: ;
}
goto LA1;
LA5: ;
{
Tloc292816 a0;
Ttype292840* typ0;
memset((void*)(&a0), 0, sizeof(a0));
typ0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
NIM_BOOL LOC17;
NIM_BOOL LOC20;
Tnode292802* LOC25;
Tnode292802* LOC26;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*typ0).kind == ((Ttypekind292244) 23));
if (!(LOC17)) goto LA18;
LOC17 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA18: ;
LOC16 = LOC17;
if (!(LOC16)) goto LA19;
LOC20 = (NIM_BOOL)0;
LOC20 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC20) goto LA21;
LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA21: ;
LOC16 = LOC20;
LA19: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA22;
LOC15 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 64));
LA22: ;
if (!LOC15) goto LA23;
LOC25 = (Tnode292802*)0;
LOC25 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
LOC26 = (Tnode292802*)0;
LOC26 = HEX5BHEX5D_293238_850551059(LOC25, ((NI) 0));
initlocexprsingleuse_539289_839829468(p0, LOC26, d0);
goto BeforeRet;
}
goto LA13;
LA23: ;
{
initlocexprsingleuse_539289_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA13: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA30;
switch ((*typ0).kind) {
case ((Ttypekind292244) 22):
{
(*d0).s = ((Tstorageloc292812) 3);
}
break;
case ((Ttypekind292244) 23):
{
(*d0).s = ((Tstorageloc292812) 0);
{
NIM_BOOL LOC36;
NIM_BOOL LOC37;
NIM_BOOL LOC39;
Ropeobj178006* LOC44;
LOC36 = (NIM_BOOL)0;
LOC37 = (NIM_BOOL)0;
LOC37 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
if (!(LOC37)) goto LA38;
LOC39 = (NIM_BOOL)0;
LOC39 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC39) goto LA40;
LOC39 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA40: ;
LOC37 = LOC39;
LA38: ;
LOC36 = LOC37;
if (!(LOC36)) goto LA41;
LOC36 = ((*e0).kind == ((Tnodekind292020) 65));
LA41: ;
if (!LOC36) goto LA42;
LOC44 = (Ropeobj178006*)0;
LOC44 = rdloc_538188_839829468(a0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC44, a0.s);
goto BeforeRet;
}
LA42: ;
}
break;
case ((Ttypekind292244) 21):
{
(*d0).s = ((Tstorageloc292812) 0);
}
break;
default:
{
NimStringDesc* LOC47;
LOC47 = (NimStringDesc*)0;
LOC47 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 9);
appendString(LOC47, ((NimStringDesc*) &T839829468_548));
appendString(LOC47, reprEnum((NI)(*typ0).kind, (&NTI292244)));
internalerror_196100_155036129((*e0).info, LOC47);
}
break;
}
}
goto LA28;
LA30: ;
{
NIM_BOOL LOC49;
LOC49 = (NIM_BOOL)0;
LOC49 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC49) goto LA50;
LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA50: ;
if (!LOC49) goto LA51;
{
NIM_BOOL LOC55;
NIM_BOOL LOC56;
Ropeobj178006* LOC61;
LOC55 = (NIM_BOOL)0;
LOC56 = (NIM_BOOL)0;
LOC56 = ((*typ0).kind == ((Ttypekind292244) 23));
if (!(LOC56)) goto LA57;
LOC56 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA57: ;
LOC55 = LOC56;
if (!(LOC55)) goto LA58;
LOC55 = ((*e0).kind == ((Tnodekind292020) 65));
LA58: ;
if (!LOC55) goto LA59;
LOC61 = (Ropeobj178006*)0;
LOC61 = rdloc_538188_839829468(a0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC61, a0.s);
goto BeforeRet;
}
LA59: ;
}
goto LA28;
LA51: ;
LA28: ;
{
NIM_BOOL LOC64;
Ropeobj178006* LOC68;
LOC64 = (NIM_BOOL)0;
LOC64 = enforcederef0;
if (!(LOC64)) goto LA65;
LOC64 = (mt0 == ((Tctypekind529007) 18));
LA65: ;
if (!LOC64) goto LA66;
LOC68 = (Ropeobj178006*)0;
LOC68 = rdloc_538188_839829468(a0);
putintodest_550468_839829468(p0, d0, (*a0.t).sons->data[((NI) 0)], LOC68, a0.s);
}
goto LA62;
LA66: ;
{
TY178507 LOC70;
Ropeobj178006* LOC71;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC70[0] = rdloc_538188_839829468(a0);
LOC71 = (Ropeobj178006*)0;
LOC71 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC70, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC71, a0.s);
}
LA62: ;
}
LA1: ;
}BeforeRet: ;
}
N_NIMCALL(Ttype292840*, genrecordfieldaux_553096_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tloc292816* a0) {
Ttype292840* result0;
Ropeobj178006* LOC9;
result0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], a0);
{
if (!!(((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 3)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_549));
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA7;
(*d0).s = (*a0).s;
}
LA7: ;
LOC9 = (Ropeobj178006*)0;
LOC9 = gettypedesc_535671_839829468((*p0).module, (*a0).t);
result0 = getuniquetype_528640_2036603609((*a0).t);
return result0;
}
N_NIMCALL(void, genrecordfield_553448_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* ty0;
Ropeobj178006* r0;
Tsym292834* f0;
memset((void*)(&a0), 0, sizeof(a0));
ty0 = genrecordfieldaux_553096_839829468(p0, e0, d0, (&a0));
r0 = rdloc_538188_839829468(a0);
f0 = (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
{
TY178507 LOC5;
if (!((*ty0).kind == ((Ttypekind292244) 18))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(((NI64) ((*f0).position)));
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC5, 1);
putintodest_550468_839829468(p0, d0, (*f0).typ, r0, a0.s);
}
goto LA1;
LA3: ;
{
Tsym292834* field0;
TY178507 LOC11;
field0 = lookupfieldagain_553153_839829468(p0, ty0, f0, &r0);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA9;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_550));
}
LA9: ;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = (*field0).loc.r;
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_551), LOC11, 1);
putintodest_550468_839829468(p0, d0, (*field0).typ, r0, a0.s);
}
LA1: ;
}
N_NIMCALL(void, gencheckedrecordfield_554046_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Tloc292816 a0;
Ttype292840* ty0;
Ropeobj178006* r0;
Tsym292834* f0;
Tsym292834* field0;
TY178507 LOC9;
Ropeobj178006* LOC10;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0)) goto LA3;
memset((void*)(&a0), 0, sizeof(a0));
ty0 = genrecordfieldaux_553096_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0, (&a0));
r0 = rdloc_538188_839829468(a0);
f0 = (*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
field0 = lookupfieldagain_553153_839829468(p0, ty0, f0, &r0);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA7;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_532));
}
LA7: ;
genfieldcheck_553504_839829468(p0, e0, r0, field0, ty0);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = (*field0).loc.r;
LOC10 = (Ropeobj178006*)0;
LOC10 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_551), LOC9, 1);
add_178482_2381377266(&r0, LOC10);
putintodest_550468_839829468(p0, d0, (*field0).typ, r0, a0.s);
}
goto LA1;
LA3: ;
{
genrecordfield_553448_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
}
LA1: ;
}
N_NIMCALL(NI, startblock_543978_839829468)(Tcproc529021* p0, NimStringDesc* start0, Ropeobj178006** args0, NI args0Len0) {
NI result0;
result0 = (NI)0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), start0, args0, args0Len0);
(*p0).labels += ((NI) 1);
result0 = ((*p0).blocks ? (*p0).blocks->Sup.len : 0);
(*p0).blocks = (TY529095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock529019), ((NI) ((NI)(result0 + ((NI) 1)))));
(*p0).blocks->data[result0].id = ((NI) ((*p0).labels));
(*p0).blocks->data[result0].nestedtrystmts = ((NI16) (((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0)));
(*p0).blocks->data[result0].nestedexceptstmts = ((NI16) ((*p0).inexceptblock));
return result0;
}
N_NIMCALL(Ropeobj178006*, blockbody_544025_839829468)(Tblock529019* b0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*b0).sections[(((Tcprocsection529011) 0))- 0];
{
TY178507 LOC5;
if (!(((NI16) 0) < (*b0).framelen)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(((NI64) ((*b0).framelen)));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_554), LOC5, 1);
}
LA3: ;
add_178482_2381377266(&result0, (*b0).sections[(((Tcprocsection529011) 1))- 0]);
add_178482_2381377266(&result0, (*b0).sections[(((Tcprocsection529011) 2))- 0]);
return result0;
}
N_NIMCALL(void, endblock_544035_839829468)(Tcproc529021* p0, Ropeobj178006* blockend0) {
NI topblock0;
Ropeobj178006* LOC1;
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
LOC1 = (Ropeobj178006*)0;
LOC1 = blockbody_544025_839829468((&(*p0).blocks->data[topblock0]));
add_178482_2381377266(&(*p0).blocks->data[(NI)(topblock0 - ((NI) 1))].sections[(((Tcprocsection529011) 2))- 0], LOC1);
(*p0).blocks = (TY529095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock529019), ((NI) (topblock0)));
line_532690_839829468(p0, ((Tcprocsection529011) 2), blockend0);
}
N_NIMCALL(void, endblock_544060_839829468)(Tcproc529021* p0) {
NI topblock0;
Ropeobj178006* blockend0;
NI16 framelen0;
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
{
TY178507 LOC5;
if (!!(((*p0).blocks->data[topblock0].label == NIM_NIL))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = (*p0).blocks->data[topblock0].label;
blockend0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_552), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
blockend0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_160), LOC7, 0);
}
LA1: ;
framelen0 = (*p0).blocks->data[topblock0].framelen;
{
TY178507 LOC12;
if (!(((NI16) 0) < framelen0)) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178401_2381377266(((NI64) (framelen0)));
addf_179205_2381377266(&blockend0, ((NimStringDesc*) &T839829468_553), LOC12, 1);
}
LA10: ;
endblock_544035_839829468(p0, blockend0);
}
N_NIMCALL(void, genblock_546083_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI oldbreakidx_546099_839829468;
TY533289 LOC8;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297440_850551059((*n0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA6: ;
oldbreakidx_546099_839829468 = (*p0).breakidx;
memset((void*)LOC8, 0, sizeof(LOC8));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC8, 0);
{
Tsym292834* sym0;
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA11;
sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
(*sym0).loc.k = ((Tlockind292808) 10);
(*sym0).position = (NI)((*p0).breakidx + ((NI) 1));
}
LA11: ;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], d0);
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546099_839829468;
}
N_NIMCALL(void, genstmtlistexpr_558402_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI length0;
length0 = sonslen_295351_850551059(n0);
{
NI i_558420_839829468;
NI HEX3Atmp_558424_839829468;
NI res_558427_839829468;
i_558420_839829468 = (NI)0;
HEX3Atmp_558424_839829468 = (NI)0;
HEX3Atmp_558424_839829468 = (NI)(length0 - ((NI) 2));
res_558427_839829468 = ((NI) 0);
{
while (1) {
if (!(res_558427_839829468 <= HEX3Atmp_558424_839829468)) goto LA3;
i_558420_839829468 = res_558427_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[i_558420_839829468]);
res_558427_839829468 += ((NI) 1);
} LA3: ;
}
}
{
if (!(((NI) 0) < length0)) goto LA6;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0);
}
LA6: ;
}
N_NIMCALL(void, genif_544982_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ropeobj178006* lelse0;
Ropeobj178006* lend0;
memset((void*)(&a0), 0, sizeof(a0));
lelse0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297440_850551059((*n0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA6: ;
genlinedir_532823_839829468(p0, n0);
lend0 = getlabel_539217_839829468(p0);
{
NI i_545011_839829468;
NI HEX3Atmp_545435_839829468;
NI LOC9;
NI res_545438_839829468;
i_545011_839829468 = (NI)0;
HEX3Atmp_545435_839829468 = (NI)0;
LOC9 = (NI)0;
LOC9 = sonslen_295351_850551059(n0);
HEX3Atmp_545435_839829468 = (NI)(LOC9 - ((NI) 1));
res_545438_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
if (!(res_545438_839829468 <= HEX3Atmp_545435_839829468)) goto LA11;
i_545011_839829468 = res_545438_839829468;
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC14)) goto LA15;
LOC14 = isemptytype_297440_850551059((*n0).typ);
LA15: ;
if (!LOC14) goto LA16;
(*d0).k = ((Tlockind292808) 0);
}
LA16: ;
it0 = (*n0).kindU.S6.sons->data[i_545011_839829468];
{
NI LOC20;
TY533289 LOC23;
NI LOC24;
TY532811 LOC25;
LOC20 = (NI)0;
LOC20 = len_293081_850551059(it0);
if (!(LOC20 == ((NI) 2))) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC24 = (NI)0;
LOC24 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC23, 0);
initlocexprsingleuse_539289_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], (&a0));
lelse0 = getlabel_539217_839829468(p0);
(*p0).labels += ((NI) 1);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = rdloc_538188_839829468(a0);
LOC25[1] = lelse0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_555), LOC25, 2);
{
NIM_BOOL LOC28;
Ropeobj178006** LOC32;
Ropeobj178006** LOC33;
LOC28 = (NIM_BOOL)0;
LOC28 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC28) goto LA29;
LOC28 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA29: ;
if (!LOC28) goto LA30;
LOC32 = (Ropeobj178006**)0;
LOC32 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178487_2381377266(LOC32, ((NimStringDesc*) &T839829468_223));
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0);
LOC33 = (Ropeobj178006**)0;
LOC33 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178487_2381377266(LOC33, ((NimStringDesc*) &T839829468_280));
}
goto LA26;
LA30: ;
{
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0);
}
LA26: ;
endblock_544060_839829468(p0);
{
NI LOC37;
TY178507 LOC40;
LOC37 = (NI)0;
LOC37 = sonslen_295351_850551059(n0);
if (!(((NI) 1) < LOC37)) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = lend0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC40, 1);
}
LA38: ;
fixlabel_539230_839829468(p0, lelse0);
}
goto LA18;
LA21: ;
{
NI LOC42;
TY533289 LOC45;
NI LOC46;
LOC42 = (NI)0;
LOC42 = len_293081_850551059(it0);
if (!(LOC42 == ((NI) 1))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (NI)0;
LOC46 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0);
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], d0);
endblock_544060_839829468(p0);
}
goto LA18;
LA43: ;
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_557));
}
LA18: ;
res_545438_839829468 += ((NI) 1);
} LA11: ;
}
}
{
NI LOC50;
LOC50 = (NI)0;
LOC50 = sonslen_295351_850551059(n0);
if (!(((NI) 1) < LOC50)) goto LA51;
fixlabel_539230_839829468(p0, lend0);
}
LA51: ;
}
N_NIMCALL(void, downconv_558581_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], d0);
}
goto LA1;
LA5: ;
{
Ttype292840* dest0;
Tnode292802* arg0;
Ttype292840* src0;
Tloc292816 a0;
Ropeobj178006* r0;
NIM_BOOL isref0;
Ttype292840* LOC10;
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106247256320));
arg0 = (*n0).kindU.S6.sons->data[((NI) 0)];
{
while (1) {
if (!((*arg0).kind == ((Tnodekind292020) 66))) goto LA9;
arg0 = (*arg0).kindU.S6.sons->data[((NI) 0)];
} LA9: ;
}
src0 = skiptypes_296099_850551059((*arg0).typ, IL64(211106247256320));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, arg0, (&a0));
r0 = rdloc_538188_839829468(a0);
LOC10 = (Ttype292840*)0;
LOC10 = skiptypes_296099_850551059((*arg0).typ, IL64(211106232576256));
isref0 = ((*LOC10).kind == ((Ttypekind292244) 22) || (*LOC10).kind == ((Ttypekind292244) 21) || (*LOC10).kind == ((Ttypekind292244) 23));
{
if (!isref0) goto LA13;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_558));
}
goto LA11;
LA13: ;
{
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
}
LA11: ;
{
NI i_558650_839829468;
NI HEX3Atmp_558677_839829468;
NI LOC17;
NI res_558680_839829468;
i_558650_839829468 = (NI)0;
HEX3Atmp_558677_839829468 = (NI)0;
LOC17 = (NI)0;
LOC17 = inheritancediff_326252_3876443242(dest0, src0);
HEX3Atmp_558677_839829468 = (LOC17 > 0? (LOC17) : -(LOC17));
res_558680_839829468 = ((NI) 2);
{
while (1) {
if (!(res_558680_839829468 <= HEX3Atmp_558677_839829468)) goto LA19;
i_558650_839829468 = res_558680_839829468;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
res_558680_839829468 += ((NI) 1);
} LA19: ;
}
}
{
if (!isref0) goto LA22;
{
NIM_BOOL LOC26;
Ttype292840* LOC28;
TY532811 LOC31;
LOC26 = (NIM_BOOL)0;
LOC26 = ((*d0).k == ((Tlockind292808) 0));
if (!(LOC26)) goto LA27;
LOC28 = (Ttype292840*)0;
LOC28 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC26 = ((*LOC28).kind == ((Ttypekind292244) 22) || (*LOC28).kind == ((Ttypekind292244) 21) || (*LOC28).kind == ((Ttypekind292244) 23));
LA27: ;
if (!LOC26) goto LA29;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = rdloc_538188_839829468((*d0));
LOC31[1] = r0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_559), LOC31, 2);
}
goto LA24;
LA29: ;
{
r0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), r0);
putintodest_550468_839829468(p0, d0, (*n0).typ, r0, a0.s);
}
LA24: ;
}
goto LA20;
LA22: ;
{
putintodest_550468_839829468(p0, d0, (*n0).typ, r0, a0.s);
}
LA20: ;
}
LA1: ;
}
N_NIMCALL(void, upconv_558431_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* dest0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106247256320));
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
Ropeobj178006* r0;
Ropeobj178006* nilcheck0;
Ttype292840* t0;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*p0).options &(1U<<((NU)(((Toption169009) 1))&31U)))!=0);
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = isobjlackingtypefield_533513_839829468(dest0);
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
r0 = rdloc_538188_839829468(a0);
nilcheck0 = NIM_NIL;
t0 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
{
while (1) {
Ttype292840* LOC23;
if (!((*t0).kind == ((Ttypekind292244) 23) || (*t0).kind == ((Ttypekind292244) 21) || (*t0).kind == ((Ttypekind292244) 22))) goto LA9;
{
if (!!(((*t0).kind == ((Ttypekind292244) 23)))) goto LA12;
nilcheck0 = r0;
}
LA12: ;
{
NIM_BOOL LOC16;
NIM_BOOL LOC18;
TY178507 LOC22;
LOC16 = (NIM_BOOL)0;
LOC16 = !(((*t0).kind == ((Ttypekind292244) 23)));
if (LOC16) goto LA17;
LOC18 = (NIM_BOOL)0;
LOC18 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC18) goto LA19;
LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA19: ;
LOC16 = !(LOC18);
LA17: ;
if (!LOC16) goto LA20;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC22, 1);
}
LA20: ;
LOC23 = (Ttype292840*)0;
LOC23 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC23, IL64(211106232576256));
} LA9: ;
}
{
NIM_BOOL LOC26;
LOC26 = (NIM_BOOL)0;
LOC26 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC26) goto LA27;
LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA27: ;
if (!!(LOC26)) goto LA28;
{
while (1) {
NIM_BOOL LOC32;
LOC32 = (NIM_BOOL)0;
LOC32 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC32)) goto LA33;
LOC32 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
LA33: ;
if (!LOC32) goto LA31;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
t0 = skiptypes_296099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360));
} LA31: ;
}
}
LA28: ;
{
TY535238 LOC38;
if (!!((nilcheck0 == NIM_NIL))) goto LA36;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = nilcheck0;
LOC38[1] = r0;
LOC38[2] = gentypeinfo_535941_839829468((*p0).module, dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_560), LOC38, 3);
}
goto LA34;
LA36: ;
{
TY532811 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = r0;
LOC40[1] = gentypeinfo_535941_839829468((*p0).module, dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_561), LOC40, 2);
}
LA34: ;
}
LA6: ;
{
TY532811 LOC45;
Ropeobj178006* LOC46;
if (!!(((*(*(*n0).kindU.S6.sons->data[((NI) 0)]).typ).kind == ((Ttypekind292244) 17)))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = gettypedesc_535671_839829468((*p0).module, (*n0).typ);
LOC45[1] = rdloc_538188_839829468(a0);
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC45, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC46, a0.s);
}
goto LA41;
LA43: ;
{
TY532811 LOC48;
Ropeobj178006* LOC49;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC48[0] = gettypedesc_535671_839829468((*p0).module, dest0);
LOC48[1] = addrloc_538204_839829468(a0);
LOC49 = (Ropeobj178006*)0;
LOC49 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_429), LOC48, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC49, a0.s);
}
LA41: ;
}
N_NIMCALL(void, genrangechck_556590_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* magic0) {
Tloc292816 a0;
Ttype292840* dest0;
memset((void*)(&a0), 0, sizeof(a0));
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
{
NIM_BOOL LOC3;
Ttype292840* LOC5;
TY532811 LOC8;
Ropeobj178006* LOC9;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*p0).options &(1U<<((NU)(((Toption169009) 3))&31U)))!=0));
if (LOC3) goto LA4;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059(dest0, 1048576);
LOC3 = ((*LOC5).kind >= ((Ttypekind292244) 40) && (*LOC5).kind <= ((Ttypekind292244) 44));
LA4: ;
if (!LOC3) goto LA6;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = gettypedesc_535671_839829468((*p0).module, dest0);
LOC8[1] = rdcharloc_538227_839829468(a0);
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC8, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC9, a0.s);
}
goto LA1;
LA6: ;
{
TY536475 LOC11;
Ropeobj178006* LOC12;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535671_839829468((*p0).module, dest0);
LOC11[1] = rdcharloc_538227_839829468(a0);
LOC11[2] = genliteral_549476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], dest0);
LOC11[3] = genliteral_549476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 2)], dest0);
LOC11[4] = rope_178277_2381377266(magic0);
LOC12 = (Ropeobj178006*)0;
LOC12 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_562), LOC11, 5);
putintodest_550468_839829468(p0, d0, dest0, LOC12, a0.s);
}
LA1: ;
}
N_NIMCALL(void, convstrtocstr_556642_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* LOC1;
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468(a0);
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_485), LOC2, 1);
putintodest_550468_839829468(p0, d0, LOC1, LOC3, a0.s);
}
N_NIMCALL(void, convcstrtostr_556654_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* LOC1;
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468(a0);
LOC3 = (Ropeobj178006*)0;
LOC3 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_411), LOC2, 1);
putintodest_550468_839829468(p0, d0, LOC1, LOC3, a0.s);
gcusage_554439_839829468(n0);
}
static N_INLINE(NIM_BOOL, isroutine_297323_850551059)(Tsym292834* s0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((258048 &(1U<<((NU)((*s0).kind)&31U)))!=0);
return result0;
}
static N_INLINE(NIM_BOOL, isconstclosure_557810_839829468)(Tnode292802* n0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC2)) goto LA3;
LOC2 = isroutine_297323_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym);
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA4;
LOC1 = ((*(*n0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 23));
LA4: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genclosure_557836_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
{
NIM_BOOL LOC3;
Ropeobj178006* tmp0;
Ropeobj178006* LOC6;
TY535238 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = isconstclosure_557810_839829468(n0);
if (!LOC3) goto LA4;
(*(*p0).module).labels += ((NI) 1);
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178401_2381377266(((NI64) ((*(*p0).module).labels)));
tmp0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_566), LOC6);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535671_839829468((*p0).module, (*n0).typ);
LOC7[1] = tmp0;
LOC7[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC7, 3);
putintodest_550468_839829468(p0, d0, (*n0).typ, tmp0, ((Tstorageloc292812) 1));
}
goto LA1;
LA4: ;
{
Tloc292816 tmp0;
Tloc292816 a0;
Tloc292816 b0;
TY535238 LOC14;
memset((void*)(&tmp0), 0, sizeof(tmp0));
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&b0));
{
Tnode292802* LOC11;
LOC11 = (Tnode292802*)0;
LOC11 = skipconv_328882_3876443242((*n0).kindU.S6.sons->data[((NI) 0)]);
if (!((*LOC11).kind == ((Tnodekind292020) 155))) goto LA12;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_567));
}
LA12: ;
gettemp_537032_839829468(p0, (*n0).typ, (&tmp0), NIM_FALSE);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468(tmp0);
LOC14[1] = rdloc_538188_839829468(a0);
LOC14[2] = rdloc_538188_839829468(b0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_568), LOC14, 3);
putlocintodest_539258_839829468(p0, d0, tmp0);
}
LA1: ;
}
static N_INLINE(Ropeobj178006*, assignlabel_544020_839829468)(Tblock529019* b0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*b0).id)));
unsureAsgnRef((void**) (&(*b0).label), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC1));
result0 = (*b0).label;
return result0;
}
N_NIMCALL(void, gencomputedgoto_545744_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI casepos0;
NI arraysize0;
NI id0;
Ropeobj178006* tmp0;
TY178507 LOC27;
Ropeobj178006* gotoarray0;
TY532811 LOC28;
TY178507 LOC33;
NI topblock0;
Ropeobj178006* oldbody0;
Ropeobj178006* tailb0;
Ropeobj178006* taila0;
Tnode292802* casestmt0;
Tloc292816 a_545871_839829468;
TY532811 LOC41;
{ casepos0 = ((NI) -1);
arraysize0 = (NI)0;
{
NI i_545768_839829468;
NI HEX3Atmp_545933_839829468;
NI LOC2;
NI res_545936_839829468;
i_545768_839829468 = (NI)0;
HEX3Atmp_545933_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
HEX3Atmp_545933_839829468 = (LOC2 - 1);
res_545936_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
if (!(res_545936_839829468 <= HEX3Atmp_545933_839829468)) goto LA4;
i_545768_839829468 = res_545936_839829468;
it0 = (*n0).kindU.S6.sons->data[i_545768_839829468];
{
NI64 asize0;
if (!((*it0).kind == ((Tnodekind292020) 97))) goto LA7;
{
Tnode292802* LOC11;
LOC11 = (Tnode292802*)0;
LOC11 = lastson_295364_850551059(it0);
if (!!(((*LOC11).kind == ((Tnodekind292020) 85)))) goto LA12;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_570));
goto BeforeRet;
}
LA12: ;
casepos0 = i_545768_839829468;
asize0 = lengthord_320007_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ);
{
if (!(IL64(10000) < asize0)) goto LA16;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_571));
goto BeforeRet;
}
LA16: ;
arraysize0 = ((NI) (asize0));
{
NI64 LOC20;
LOC20 = (NI64)0;
LOC20 = firstord_320001_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ);
if (!!((LOC20 == IL64(0)))) goto LA21;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_572));
goto BeforeRet;
}
LA21: ;
}
LA7: ;
res_545936_839829468 += ((NI) 1);
} LA4: ;
}
}
{
if (!(casepos0 < ((NI) 0))) goto LA25;
localerror_196085_155036129((*n0).info, ((NimStringDesc*) &T839829468_573));
goto BeforeRet;
}
LA25: ;
id0 = (NI)(((NI) ((*p0).labels)) + ((NI) 1));
(*p0).labels += (NI)(arraysize0 + ((NI) 1));
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = rope_178401_2381377266(((NI64) (id0)));
tmp0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_574), LOC27, 1);
memset((void*)LOC28, 0, sizeof(LOC28));
LOC28[0] = tmp0;
LOC28[1] = rope_178401_2381377266(((NI64) (arraysize0)));
gotoarray0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_575), LOC28, 2);
{
NI i_545819_839829468;
NI HEX3Atmp_545941_839829468;
NI res_545944_839829468;
i_545819_839829468 = (NI)0;
HEX3Atmp_545941_839829468 = (NI)0;
HEX3Atmp_545941_839829468 = (NI)(arraysize0 - ((NI) 1));
res_545944_839829468 = ((NI) 1);
{
while (1) {
TY178507 LOC32;
if (!(res_545944_839829468 <= HEX3Atmp_545941_839829468)) goto LA31;
i_545819_839829468 = res_545944_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (id0)) + i_545819_839829468))));
addf_179205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_576), LOC32, 1);
res_545944_839829468 += ((NI) 1);
} LA31: ;
}
}
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (id0)) + arraysize0))));
addf_179205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_577), LOC33, 1);
line_532690_839829468(p0, ((Tcprocsection529011) 0), gotoarray0);
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
oldbody0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), NIM_NIL);
{
NI j_545854_839829468;
NI HEX3Atmp_545949_839829468;
NI HEX3Atmp_545950_839829468;
NI LOC35;
NI res_545953_839829468;
j_545854_839829468 = (NI)0;
HEX3Atmp_545949_839829468 = (NI)0;
HEX3Atmp_545950_839829468 = (NI)0;
HEX3Atmp_545949_839829468 = (NI)(casepos0 + ((NI) 1));
LOC35 = (NI)0;
LOC35 = len_293081_850551059(n0);
HEX3Atmp_545950_839829468 = (LOC35 - 1);
res_545953_839829468 = HEX3Atmp_545949_839829468;
{
while (1) {
if (!(res_545953_839829468 <= HEX3Atmp_545950_839829468)) goto LA37;
j_545854_839829468 = res_545953_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[j_545854_839829468]);
res_545953_839829468 += ((NI) 1);
} LA37: ;
}
}
tailb0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), NIM_NIL);
{
NI j_545866_839829468;
NI HEX3Atmp_545958_839829468;
NI res_545961_839829468;
j_545866_839829468 = (NI)0;
HEX3Atmp_545958_839829468 = (NI)0;
HEX3Atmp_545958_839829468 = (NI)(casepos0 - ((NI) 1));
res_545961_839829468 = ((NI) 0);
{
while (1) {
if (!(res_545961_839829468 <= HEX3Atmp_545958_839829468)) goto LA40;
j_545866_839829468 = res_545961_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[j_545866_839829468]);
res_545961_839829468 += ((NI) 1);
} LA40: ;
}
}
taila0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), HEX26_178418_2381377266(oldbody0, taila0));
casestmt0 = (*n0).kindU.S6.sons->data[casepos0];
memset((void*)(&a_545871_839829468), 0, sizeof(a_545871_839829468));
initlocexpr_539283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a_545871_839829468));
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = tmp0;
LOC41[1] = rdloc_538188_839829468(a_545871_839829468);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_578), LOC41, 2);
{
NI i_545894_839829468;
NI HEX3Atmp_545977_839829468;
NI LOC43;
NI res_545980_839829468;
i_545894_839829468 = (NI)0;
HEX3Atmp_545977_839829468 = (NI)0;
LOC43 = (NI)0;
LOC43 = len_293081_850551059(casestmt0);
HEX3Atmp_545977_839829468 = (LOC43 - 1);
res_545980_839829468 = ((NI) 1);
{
while (1) {
TY533289 LOC46;
NI LOC47;
Tnode292802* it0;
Tnode292802* LOC57;
Ropeobj178006** LOC58;
Ropeobj178006** LOC59;
Tloc292816 a0;
TY532811 LOC60;
if (!(res_545980_839829468 <= HEX3Atmp_545977_839829468)) goto LA45;
i_545894_839829468 = res_545980_839829468;
memset((void*)LOC46, 0, sizeof(LOC46));
LOC47 = (NI)0;
LOC47 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC46, 0);
it0 = (*casestmt0).kindU.S6.sons->data[i_545894_839829468];
{
NI j_545910_839829468;
NI HEX3Atmp_545969_839829468;
NI LOC49;
NI res_545972_839829468;
j_545910_839829468 = (NI)0;
HEX3Atmp_545969_839829468 = (NI)0;
LOC49 = (NI)0;
LOC49 = len_293081_850551059(it0);
HEX3Atmp_545969_839829468 = (NI)(LOC49 - ((NI) 2));
res_545972_839829468 = ((NI) 0);
{
while (1) {
NI64 val0;
TY178507 LOC56;
if (!(res_545972_839829468 <= HEX3Atmp_545969_839829468)) goto LA51;
j_545910_839829468 = res_545972_839829468;
{
if (!((*(*it0).kindU.S6.sons->data[j_545910_839829468]).kind == ((Tnodekind292020) 44))) goto LA54;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579));
goto BeforeRet;
}
LA54: ;
val0 = getordvalue_320129_3876443242((*it0).kindU.S6.sons->data[j_545910_839829468]);
memset((void*)LOC56, 0, sizeof(LOC56));
LOC56[0] = intliteral_539270_839829468((NI64)((NI64)(val0 + ((NI64) (id0))) + IL64(1)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_580), LOC56, 1);
res_545972_839829468 += ((NI) 1);
} LA51: ;
}
}
LOC57 = (Tnode292802*)0;
LOC57 = lastson_295364_850551059(it0);
genstmts_539244_839829468(p0, LOC57);
LOC58 = (Ropeobj178006**)0;
LOC58 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC58, tailb0);
LOC59 = (Ropeobj178006**)0;
LOC59 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC59, taila0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC60, 0, sizeof(LOC60));
LOC60[0] = tmp0;
LOC60[1] = rdloc_538188_839829468(a0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_578), LOC60, 2);
endblock_544060_839829468(p0);
res_545980_839829468 += ((NI) 1);
} LA45: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, genwhilestmt_545984_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Tloc292816 a0;
NI oldbreakidx_546011_839829468;
TY533289 LOC1;
Tnode292802* loopbody0;
memset((void*)(&a0), 0, sizeof(a0));
(*p0).withinloop += ((NI) 1);
genlinedir_532823_839829468(p0, t0);
oldbreakidx_546011_839829468 = (*p0).breakidx;
memset((void*)LOC1, 0, sizeof(LOC1));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_569), LOC1, 0);
(*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE;
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
NIM_BOOL LOC4;
Ropeobj178006* label0;
TY532811 LOC8;
LOC4 = (NIM_BOOL)0;
LOC4 = !(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 6)));
if (LOC4) goto LA5;
LOC4 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval == IL64(0));
LA5: ;
if (!LOC4) goto LA6;
label0 = assignlabel_544020_839829468((&(*p0).blocks->data[(*p0).breakidx]));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC8[1] = label0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_555), LOC8, 2);
}
LA6: ;
loopbody0 = (*t0).kindU.S6.sons->data[((NI) 1)];
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = stmtscontainpragma_528083_2036603609(loopbody0, ((Tspecialword275003) 182));
if (!(LOC11)) goto LA12;
LOC11 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 1))&7U)))!=0);
LA12: ;
if (!LOC11) goto LA13;
{
NIM_BOOL LOC17;
NI LOC18;
LOC17 = (NIM_BOOL)0;
LOC18 = (NI)0;
LOC18 = len_293081_850551059(loopbody0);
LOC17 = (LOC18 == ((NI) 2));
if (!(LOC17)) goto LA19;
LOC17 = ((*(*loopbody0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1));
LA19: ;
if (!LOC17) goto LA20;
loopbody0 = (*loopbody0).kindU.S6.sons->data[((NI) 1)];
}
LA20: ;
gencomputedgoto_545744_839829468(p0, loopbody0);
}
goto LA9;
LA13: ;
{
genstmts_539244_839829468(p0, loopbody0);
}
LA9: ;
{
TY533289 LOC27;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 19))&31U)))!=0)) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_581), LOC27, 0);
}
LA25: ;
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546011_839829468;
(*p0).withinloop -= ((NI) 1);
}
N_NIMCALL(void, gengotovar_544258_839829468)(Tcproc529021* p0, Tnode292802* value0) {
{
if (!!(((*value0).kind >= ((Tnodekind292020) 5) && (*value0).kind <= ((Tnodekind292020) 15)))) goto LA3;
localerror_196085_155036129((*value0).info, ((NimStringDesc*) &T839829468_582));
}
goto LA1;
LA3: ;
{
TY178507 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266((*value0).kindU.S1.intval);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_583), LOC6, 1);
}
LA1: ;
}
N_NIMCALL(void, varindynamiclib_538812_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Tlib292820* lib0;
Ropeobj178006* extname0;
Ropeobj178006* tmp0;
TY535235 LOC1;
NimStringDesc* LOC2;
TY532811 LOC3;
lib0 = (*sym0).annex;
extname0 = (*sym0).loc.r;
loaddynamiclib_559480_839829468(m0, lib0);
(*sym0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
tmp0 = mangledynlibproc_538816_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0);
(*m0).labels += ((NI) 2);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = tmp0;
LOC1[1] = gettypedesc_535671_839829468(m0, (*sym0).typ);
LOC1[2] = (*lib0).name;
LOC2 = (NimStringDesc*)0;
LOC2 = HEX24_178856_2381377266(extname0);
LOC1[3] = makecstring_191638_155036129(LOC2);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_584), LOC1, 4);
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = (*sym0).loc.r;
LOC3[1] = gettypedesc_535671_839829468(m0, (*sym0).loc.t);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_585), LOC3, 2);
}
N_NIMCALL(void, assignglobalvar_538819_839829468)(Tcproc529021* p0, Tsym292834* s0) {
{ {
Ropeobj178006* LOC5;
if (!((*s0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(s0);
fillloc_532282_839829468((&(*s0).loc), ((Tlockind292808) 3), (*s0).typ, LOC5, ((Tstorageloc292812) 3));
}
LA3: ;
{
Tcgen529027* q0;
if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA8;
q0 = findpendingmodule_532241_839829468((*p0).module, s0);
{
NIM_BOOL LOC12;
NIM_BOOL LOC14;
LOC12 = (NIM_BOOL)0;
LOC12 = !((q0 == NIM_NIL));
if (!(LOC12)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*s0).Sup.id);
LOC12 = !(LOC14);
LA13: ;
if (!LOC12) goto LA15;
varindynamiclib_538812_839829468(q0, s0);
}
goto LA10;
LA15: ;
{
asgnRefNoCycle((void**) (&(*s0).loc.r), mangledynlibproc_538816_839829468(s0));
}
LA10: ;
goto BeforeRet;
}
LA8: ;
useheader_532369_839829468((*p0).module, s0);
{
if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA20;
goto BeforeRet;
}
LA20: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA24;
declarethreadvar_538676_839829468((*p0).module, s0, (((*s0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0));
}
goto LA22;
LA24: ;
{
Ropeobj178006* decl0;
Ropeobj178006* td0;
decl0 = NIM_NIL;
td0 = gettypedesc_535671_839829468((*p0).module, (*s0).loc.t);
{
TY178507 LOC43;
if (!(*s0).constraint == 0) goto LA29;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0)) goto LA33;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_240));
}
LA33: ;
add_178482_2381377266(&decl0, td0);
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA37;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_121));
}
LA37: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA41;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_122));
}
LA41: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = (*s0).loc.r;
addf_179205_2381377266(&decl0, ((NimStringDesc*) &T839829468_242), LOC43, 1);
}
goto LA27;
LA29: ;
{
NimStringDesc* LOC45;
TY532811 LOC46;
LOC45 = (NimStringDesc*)0;
LOC45 = rawNewString((*(*s0).constraint).kindU.S3.strval->Sup.len + 3);
appendString(LOC45, (*(*s0).constraint).kindU.S3.strval);
appendString(LOC45, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC46, 0, sizeof(LOC46));
LOC46[0] = td0;
LOC46[1] = (*s0).loc.r;
decl0 = HEX25_178905_2381377266(LOC45, LOC46, 2);
}
LA27: ;
add_178482_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 9))- 0], decl0);
}
LA22: ;
{
if (!(((NI) 0) < (*p0).withinloop)) goto LA49;
resetloc_538350_839829468(p0, (&(*s0).loc));
}
LA49: ;
{
TY535238 LOC55;
NimStringDesc* LOC56;
NimStringDesc* LOC57;
if (!(((*(*(*p0).module).module).options & 163840) == 163840)) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC56 = (NimStringDesc*)0;
LOC56 = rawNewString((*(*(*s0).owner).name).s->Sup.len + (*(*s0).name).s->Sup.len + 1);
appendString(LOC56, (*(*(*s0).owner).name).s);
appendChar(LOC56, 46);
appendString(LOC56, (*(*s0).name).s);
LOC57 = (NimStringDesc*)0;
LOC57 = nsuNormalize(LOC56);
LOC55[0] = makecstring_191638_155036129(LOC57);
LOC55[1] = (*s0).loc.r;
LOC55[2] = gentypeinfo_535941_839829468((*p0).module, (*s0).typ);
appcg_532632_839829468((*p0).module, &(*(*p0).module).s[(((Tcfilesection529005) 15))- 0], ((NimStringDesc*) &T839829468_586), LOC55, 3);
}
LA53: ;
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, gentraverseprocforglobal_538032_839829468)(Tcgen529027* m0, Tsym292834* s0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
Ttraversalclosure537019 c0;
Tcproc529021* p0;
Ropeobj178006* sloc0;
Ropeobj178006* header0;
TY178507 LOC8;
Ropeobj178006* generatedproc0;
TY535235 LOC9;
Ropeobj178006** LOC10;
Ropeobj178006** LOC11;
Ropeobj178006** LOC12;
TY178507 LOC13;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468(m0, (*s0).loc.t);
memset((void*)(&c0), 0, sizeof(c0));
p0 = newproc_529206_3723162438(NIM_NIL, m0);
sloc0 = (*s0).loc.r;
result0 = gettempname_533596_839829468(m0);
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*s0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = emulatedthreadvars_532949_839829468();
LA5: ;
if (!LOC4) goto LA6;
accessthreadlocalvar_532945_839829468(p0, s0);
sloc0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_288), sloc0);
}
LA6: ;
c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_587));
c0.p = p0;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = result0;
header0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_588), LOC8, 1);
gentraverseproc_537022_839829468((&c0), sloc0, (*s0).loc.t);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = header0;
LOC10 = (Ropeobj178006**)0;
LOC10 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC9[1] = (*LOC10);
LOC11 = (Ropeobj178006**)0;
LOC11 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC9[2] = (*LOC11);
LOC12 = (Ropeobj178006**)0;
LOC12 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC9[3] = (*LOC12);
generatedproc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_190), LOC9, 4);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = header0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC13, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
return result0;
}
N_NIMCALL(void, registergcroot_543762_839829468)(Tcproc529021* p0, Tsym292834* v0) {
{
NIM_BOOL LOC3;
Ropeobj178006* prc0;
Ropeobj178006** LOC7;
TY178507 LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((240 &(1U<<((NU)(gselectedgc_169133_2607990831)&7U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = containsgarbagecollectedref_320117_3876443242((*v0).loc.t);
LA4: ;
if (!LOC3) goto LA5;
prc0 = gentraverseprocforglobal_538032_839829468((*p0).module, v0);
LOC7 = (Ropeobj178006**)0;
LOC7 = procsec_529194_3723162438((*(*p0).module).initproc, ((Tcprocsection529011) 1));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = prc0;
appcg_532632_839829468((*p0).module, LOC7, ((NimStringDesc*) &T839829468_589), LOC8, 1);
}
LA5: ;
}
static N_INLINE(NIM_BOOL, isassignedimmediately_543781_839829468)(Tnode292802* n0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!((*n0).kind == ((Tnodekind292020) 1))) goto LA3;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA3: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = isinvalidreturntype_533548_839829468((*n0).typ);
if (!LOC7) goto LA8;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA8: ;
result0 = NIM_TRUE;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, genasgncall_543695_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, 2048);
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
genclosurecall_540452_839829468(p0, le0, ri0, d0);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
geninfixcall_541929_839829468(p0, le0, ri0, d0);
}
goto LA1;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC12)) goto LA13;
LOC12 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
gennamedparamcall_542616_839829468(p0, ri0, d0);
}
goto LA1;
LA14: ;
{
genprefixcall_539960_839829468(p0, le0, ri0, d0);
}
LA1: ;
poststmtactions_532942_839829468(p0);
}
static N_INLINE(void, loadinto_543928_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* a0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*ri0).kind == ((Tnodekind292020) 27) || (*ri0).kind == ((Tnodekind292020) 29) || (*ri0).kind == ((Tnodekind292020) 30) || (*ri0).kind == ((Tnodekind292020) 31) || (*ri0).kind == ((Tnodekind292020) 26) || (*ri0).kind == ((Tnodekind292020) 28) || (*ri0).kind == ((Tnodekind292020) 32));
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = !(((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)));
if (LOC5) goto LA6;
LOC5 = ((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).magic == ((Tmagic292524) 0));
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
genasgncall_543695_839829468(p0, le0, ri0, a0);
}
goto LA1;
LA7: ;
{
if (!((*ri0).kind == ((Tnodekind292020) 47) || (*ri0).kind == ((Tnodekind292020) 65))) goto LA10;
genderef_543921_839829468(p0, ri0, a0, NIM_TRUE);
}
goto LA1;
LA10: ;
{
expr_539248_839829468(p0, ri0, a0);
}
LA1: ;
}
N_NIMCALL(void, gensinglevar_544276_839829468)(Tcproc529021* p0, Tnode292802* a0) {
Tsym292834* v0;
Tcproc529021* targetproc0;
{ v0 = (*(*a0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
if (!!(((1082130432 & (*v0).flags) == 0))) goto LA3;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0)) goto LA7;
gengotovar_544258_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 2)]);
}
LA7: ;
goto BeforeRet;
}
LA3: ;
targetproc0 = p0;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 3))&31U)))!=0)) goto LA11;
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC16 = (((*v0).flags & 96) == 32);
if (!(LOC16)) goto LA17;
LOC16 = ((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1));
LA17: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA18;
LOC15 = !((((*v0).loc.flags & 72) == 0));
LA18: ;
if (!LOC15) goto LA19;
goto BeforeRet;
}
LA19: ;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)) goto LA23;
targetproc0 = (*(*p0).module).preinitproc;
}
LA23: ;
assignglobalvar_538819_839829468(targetproc0, v0);
genobjectinit_538242_839829468((*(*p0).module).preinitproc, ((Tcprocsection529011) 1), (*v0).typ, (*v0).loc, NIM_TRUE);
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = (((*v0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC27)) goto LA28;
LOC27 = !((generatedheader_532201_839829468 == NIM_NIL));
LA28: ;
if (!LOC27) goto LA29;
genvarprototypeaux_544254_839829468(generatedheader_532201_839829468, v0);
}
LA29: ;
registergcroot_543762_839829468(p0, v0);
}
goto LA9;
LA11: ;
{
Tnode292802* value0;
NIM_BOOL imm0;
value0 = (*a0).kindU.S6.sons->data[((NI) 2)];
imm0 = isassignedimmediately_543781_839829468(value0);
{
NIM_BOOL LOC34;
NIM_BOOL LOC35;
NIM_BOOL LOC36;
NIM_BOOL LOC38;
NIM_BOOL LOC42;
Ropeobj178006* decl0;
Tloc292816 tmp0;
LOC34 = (NIM_BOOL)0;
LOC35 = (NIM_BOOL)0;
LOC36 = (NIM_BOOL)0;
LOC36 = imm0;
if (!(LOC36)) goto LA37;
LOC38 = (NIM_BOOL)0;
LOC38 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC38) goto LA39;
LOC38 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA39: ;
LOC36 = LOC38;
LA37: ;
LOC35 = LOC36;
if (!(LOC35)) goto LA40;
LOC35 = ((*p0).splitdecls == ((NI) 0));
LA40: ;
LOC34 = LOC35;
if (!(LOC34)) goto LA41;
LOC42 = (NIM_BOOL)0;
LOC42 = containshiddenpointer_320120_3876443242((*v0).typ);
LOC34 = !(LOC42);
LA41: ;
if (!LOC34) goto LA43;
genlinedir_532823_839829468(p0, a0);
decl0 = localvardecl_538532_839829468(p0, v0);
memset((void*)(&tmp0), 0, sizeof(tmp0));
{
NIM_BOOL LOC47;
NIM_BOOL LOC48;
Tnode292802* LOC50;
Tnode292802* LOC52;
Ropeobj178006* params0;
Ttype292840* typ0;
TY532811 LOC66;
LOC47 = (NIM_BOOL)0;
LOC48 = (NIM_BOOL)0;
LOC48 = ((*value0).kind == ((Tnodekind292020) 27) || (*value0).kind == ((Tnodekind292020) 29) || (*value0).kind == ((Tnodekind292020) 30) || (*value0).kind == ((Tnodekind292020) 31) || (*value0).kind == ((Tnodekind292020) 26) || (*value0).kind == ((Tnodekind292020) 28) || (*value0).kind == ((Tnodekind292020) 32));
if (!(LOC48)) goto LA49;
LOC50 = (Tnode292802*)0;
LOC50 = HEX5BHEX5D_293238_850551059(value0, ((NI) 0));
LOC48 = ((*LOC50).kind == ((Tnodekind292020) 3));
LA49: ;
LOC47 = LOC48;
if (!(LOC47)) goto LA51;
LOC52 = (Tnode292802*)0;
LOC52 = HEX5BHEX5D_293238_850551059(value0, ((NI) 0));
LOC47 = (((*(*LOC52).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 24))&31U)))!=0);
LA51: ;
if (!LOC47) goto LA53;
params0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*value0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
NI i_544619_839829468;
NI HEX3Atmp_544825_839829468;
NI LOC56;
NI res_544828_839829468;
i_544619_839829468 = (NI)0;
HEX3Atmp_544825_839829468 = (NI)0;
LOC56 = (NI)0;
LOC56 = len_293081_850551059(value0);
HEX3Atmp_544825_839829468 = (LOC56 - 1);
res_544828_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC65;
if (!(res_544828_839829468 <= HEX3Atmp_544825_839829468)) goto LA58;
i_544619_839829468 = res_544828_839829468;
{
TY533289 LOC63;
Ropeobj178006* LOC64;
if (!!((params0 == NIM_NIL))) goto LA61;
memset((void*)LOC63, 0, sizeof(LOC63));
LOC64 = (Ropeobj178006*)0;
LOC64 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC63, 0);
add_178482_2381377266(¶ms0, LOC64);
}
LA61: ;
LOC65 = (Ropeobj178006*)0;
LOC65 = genotherarg_539277_839829468(p0, value0, i_544619_839829468, typ0);
add_178482_2381377266(¶ms0, LOC65);
res_544828_839829468 += ((NI) 1);
} LA58: ;
}
}
memset((void*)LOC66, 0, sizeof(LOC66));
LOC66[0] = decl0;
LOC66[1] = params0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_590), LOC66, 2);
}
goto LA45;
LA53: ;
{
TY532811 LOC68;
initlocexprsingleuse_539289_839829468(p0, value0, (&tmp0));
memset((void*)LOC68, 0, sizeof(LOC68));
LOC68[0] = decl0;
LOC68[1] = rdloc_538188_839829468(tmp0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_591), LOC68, 2);
}
LA45: ;
goto BeforeRet;
}
LA43: ;
assignlocalvar_538614_839829468(p0, v0);
initlocalvar_538398_839829468(p0, v0, imm0);
}
LA9: ;
{
if (!!(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1)))) goto LA71;
genlinedir_532823_839829468(targetproc0, a0);
loadinto_543928_839829468(targetproc0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&(*v0).loc));
}
LA71: ;
}BeforeRet: ;
}
N_NIMCALL(void, genclosurevar_544832_839829468)(Tcproc529021* p0, Tnode292802* a0) {
NIM_BOOL immediateasgn0;
immediateasgn0 = !(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1)));
{
Tloc292816 v0;
if (!immediateasgn0) goto LA3;
memset((void*)(&v0), 0, sizeof(v0));
initlocexpr_539283_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (&v0));
genlinedir_532823_839829468(p0, a0);
loadinto_543928_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&v0));
}
LA3: ;
}
N_NIMCALL(void, genvartuple_543794_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 tup0;
Tloc292816 field0;
NI L0;
NIM_BOOL uselowering0;
Ttype292840* t0;
{ memset((void*)(&tup0), 0, sizeof(tup0));
memset((void*)(&field0), 0, sizeof(field0));
{
if (!!(((*n0).kind == ((Tnodekind292020) 36)))) goto LA3;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592));
}
LA3: ;
L0 = sonslen_295351_850551059(n0);
uselowering0 = NIM_FALSE;
{
NI i_543822_839829468;
NI HEX3Atmp_543905_839829468;
NI res_543908_839829468;
i_543822_839829468 = (NI)0;
HEX3Atmp_543905_839829468 = (NI)0;
HEX3Atmp_543905_839829468 = (NI)(L0 - ((NI) 3));
res_543908_839829468 = ((NI) 0);
{
while (1) {
if (!(res_543908_839829468 <= HEX3Atmp_543905_839829468)) goto LA7;
i_543822_839829468 = res_543908_839829468;
{
Tnode292802* LOC10;
LOC10 = (Tnode292802*)0;
LOC10 = HEX5BHEX5D_293238_850551059(n0, i_543822_839829468);
if (!!(((*LOC10).kind == ((Tnodekind292020) 3)))) goto LA11;
uselowering0 = NIM_TRUE;
goto LA5;
}
LA11: ;
res_543908_839829468 += ((NI) 1);
} LA7: ;
}
} LA5: ;
{
Tnode292802* LOC17;
if (!uselowering0) goto LA15;
LOC17 = (Tnode292802*)0;
LOC17 = lowertupleunpacking_433037_2218250499(n0, (*p0).prc);
genstmts_539244_839829468(p0, LOC17);
goto BeforeRet;
}
LA15: ;
genlinedir_532823_839829468(p0, n0);
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(L0 - ((NI) 1))], (&tup0));
t0 = getuniquetype_528640_2036603609(tup0.t);
{
NI i_543846_839829468;
NI HEX3Atmp_543914_839829468;
NI res_543917_839829468;
i_543846_839829468 = (NI)0;
HEX3Atmp_543914_839829468 = (NI)0;
HEX3Atmp_543914_839829468 = (NI)(L0 - ((NI) 3));
res_543917_839829468 = ((NI) 0);
{
while (1) {
if (!(res_543917_839829468 <= HEX3Atmp_543914_839829468)) goto LA20;
i_543846_839829468 = res_543917_839829468;
{
Tsym292834* v0;
v0 = (*(*n0).kindU.S6.sons->data[i_543846_839829468]).kindU.S4.sym;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)) goto LA24;
goto LA21;
}
LA24: ;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 3))&31U)))!=0)) goto LA28;
assignglobalvar_538819_839829468(p0, v0);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 1), (*v0).typ, (*v0).loc, NIM_TRUE);
registergcroot_543762_839829468(p0, v0);
}
goto LA26;
LA28: ;
{
Tnode292802* LOC31;
NIM_BOOL LOC32;
assignlocalvar_538614_839829468(p0, v0);
LOC31 = (Tnode292802*)0;
LOC31 = HEX5BHEX5D_293238_850551059(n0, (NI)(L0 - ((NI) 1)));
LOC32 = (NIM_BOOL)0;
LOC32 = isassignedimmediately_543781_839829468(LOC31);
initlocalvar_538398_839829468(p0, v0, LOC32);
}
LA26: ;
initloc_532273_839829468((&field0), ((Tlockind292808) 6), (*t0).sons->data[i_543846_839829468], tup0.s);
{
TY532811 LOC37;
if (!((*t0).kind == ((Ttypekind292244) 18))) goto LA35;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = rdloc_538188_839829468(tup0);
LOC37[1] = rope_178401_2381377266(((NI64) (i_543846_839829468)));
field0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_185), LOC37, 2);
}
goto LA33;
LA35: ;
{
TY532811 LOC43;
{
if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_543846_839829468]).kind == ((Tnodekind292020) 3)))) goto LA41;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592));
}
LA41: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = rdloc_538188_839829468(tup0);
LOC43[1] = manglerecfieldname_534361_839829468((*(*(*t0).n).kindU.S6.sons->data[i_543846_839829468]).kindU.S4.sym, t0);
field0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC43, 2);
}
LA33: ;
putlocintodest_539258_839829468(p0, (&(*v0).loc), field0);
} LA21: ;
res_543917_839829468 += ((NI) 1);
} LA20: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, genvarstmt_544854_839829468)(Tcproc529021* p0, Tnode292802* n0) {
{
NI i_544869_839829468;
NI HEX3Atmp_544902_839829468;
NI LOC2;
NI res_544905_839829468;
i_544869_839829468 = (NI)0;
HEX3Atmp_544902_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(n0);
HEX3Atmp_544902_839829468 = (NI)(LOC2 - ((NI) 1));
res_544905_839829468 = ((NI) 0);
{
while (1) {
if (!(res_544905_839829468 <= HEX3Atmp_544902_839829468)) goto LA4;
i_544869_839829468 = res_544905_839829468;
{
Tnode292802* a0;
a0 = (*n0).kindU.S6.sons->data[i_544869_839829468];
{
if (!((*a0).kind == ((Tnodekind292020) 125))) goto LA8;
goto LA5;
}
LA8: ;
{
if (!((*a0).kind == ((Tnodekind292020) 35))) goto LA12;
{
if (!((*(*a0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3))) goto LA16;
gensinglevar_544276_839829468(p0, a0);
}
goto LA14;
LA16: ;
{
genclosurevar_544832_839829468(p0, a0);
}
LA14: ;
}
goto LA10;
LA12: ;
{
genvartuple_543794_839829468(p0, a0);
}
LA10: ;
} LA5: ;
res_544905_839829468 += ((NI) 1);
} LA4: ;
}
}
}
static N_INLINE(NIM_BOOL, emitlazily_532248_839829468)(Tsym292834* s0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
Tsym292834* LOC3;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0);
if (LOC1) goto LA2;
LOC3 = (Tsym292834*)0;
LOC3 = getmodule_299123_2984716966(s0);
LOC1 = (((*LOC3).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genconststmt_544909_839829468)(Tcproc529021* p0, Tnode292802* t0) {
{
NI i_544924_839829468;
NI HEX3Atmp_544975_839829468;
NI LOC2;
NI res_544978_839829468;
i_544924_839829468 = (NI)0;
HEX3Atmp_544975_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_544975_839829468 = (NI)(LOC2 - ((NI) 1));
res_544978_839829468 = ((NI) 0);
{
while (1) {
if (!(res_544978_839829468 <= HEX3Atmp_544975_839829468)) goto LA4;
i_544924_839829468 = res_544978_839829468;
{
Tnode292802* it0;
Tsym292834* c0;
it0 = (*t0).kindU.S6.sons->data[i_544924_839829468];
{
if (!((*it0).kind == ((Tnodekind292020) 125))) goto LA8;
goto LA5;
}
LA8: ;
{
if (!!(((*it0).kind == ((Tnodekind292020) 102)))) goto LA12;
internalerror_196100_155036129((*t0).info, ((NimStringDesc*) &T839829468_593));
}
LA12: ;
c0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NIM_BOOL LOC16;
LOC16 = (NIM_BOOL)0;
LOC16 = containscompiletimeonly_328721_3876443242((*c0).typ);
if (!LOC16) goto LA17;
goto LA5;
}
goto LA14;
LA17: ;
{
NIM_BOOL LOC20;
NIM_BOOL LOC21;
NI LOC24;
LOC20 = (NIM_BOOL)0;
LOC21 = (NIM_BOOL)0;
LOC21 = ((*(*c0).typ).kind == ((Ttypekind292244) 4) || (*(*c0).typ).kind == ((Ttypekind292244) 16) || (*(*c0).typ).kind == ((Ttypekind292244) 19) || (*(*c0).typ).kind == ((Ttypekind292244) 18) || (*(*c0).typ).kind == ((Ttypekind292244) 24));
if (!(LOC21)) goto LA22;
LOC21 = !((((*c0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0));
LA22: ;
LOC20 = LOC21;
if (!(LOC20)) goto LA23;
LOC24 = (NI)0;
LOC24 = len_293081_850551059((*c0).ast);
LOC20 = !((LOC24 == ((NI) 0)));
LA23: ;
if (!LOC20) goto LA25;
{
NIM_BOOL LOC29;
LOC29 = (NIM_BOOL)0;
LOC29 = emitlazily_532248_839829468(c0);
if (!!(LOC29)) goto LA30;
requestconstimpl_539240_839829468(p0, c0);
}
LA30: ;
}
goto LA14;
LA25: ;
LA14: ;
} LA5: ;
res_544978_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, gencasestringbranch_547100_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816 e0, Ropeobj178006* labl0, Ropeobj178006** branches0, NI branches0Len0) {
Tloc292816 x0;
NI length0;
memset((void*)(&x0), 0, sizeof(x0));
length0 = sonslen_295351_850551059(b0);
{
NI i_547122_839829468;
NI HEX3Atmp_547409_839829468;
NI res_547412_839829468;
i_547122_839829468 = (NI)0;
HEX3Atmp_547409_839829468 = (NI)0;
HEX3Atmp_547409_839829468 = (NI)(length0 - ((NI) 2));
res_547412_839829468 = ((NI) 0);
{
while (1) {
NI j0;
NI64 LOC4;
TY535238 LOC5;
if (!(res_547412_839829468 <= HEX3Atmp_547409_839829468)) goto LA3;
i_547122_839829468 = res_547412_839829468;
initlocexpr_539283_839829468(p0, (*b0).kindU.S6.sons->data[i_547122_839829468], (&x0));
LOC4 = (NI64)0;
LOC4 = hashstring_528100_2036603609((*(*b0).kindU.S6.sons->data[i_547122_839829468]).kindU.S3.strval);
j0 = ((NI) ((NI64)(LOC4 & ((NI64) ((branches0Len0-1))))));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(e0);
LOC5[1] = rdloc_538188_839829468(x0);
LOC5[2] = labl0;
appcg_532632_839829468((*p0).module, &branches0[j0], ((NimStringDesc*) &T839829468_595), LOC5, 3);
res_547412_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, exprblock_544103_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
TY533289 LOC1;
NI LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0);
expr_539248_839829468(p0, n0, d0);
endblock_544060_839829468(p0);
}
N_NIMCALL(Ropeobj178006*, gencasesecondpass_546965_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NI labid0, NI until0) {
Ropeobj178006* result0;
Ropeobj178006* lend0;
result0 = (Ropeobj178006*)0;
lend0 = getlabel_539217_839829468(p0);
{
NI i_546984_839829468;
NI res_547017_839829468;
i_546984_839829468 = (NI)0;
res_547017_839829468 = ((NI) 1);
{
while (1) {
TY178507 LOC10;
if (!(res_547017_839829468 <= until0)) goto LA3;
i_546984_839829468 = res_547017_839829468;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC6)) goto LA7;
LOC6 = isemptytype_297440_850551059((*t0).typ);
LA7: ;
if (!LOC6) goto LA8;
(*d0).k = ((Tlockind292808) 0);
}
LA8: ;
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rope_178401_2381377266(((NI64) ((NI)(labid0 + i_546984_839829468))));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_599), LOC10, 1);
{
NI length0;
TY178507 LOC15;
if (!((*(*t0).kindU.S6.sons->data[i_546984_839829468]).kind == ((Tnodekind292020) 85))) goto LA13;
length0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i_546984_839829468]);
exprblock_544103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_546984_839829468]).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = lend0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC15, 1);
}
goto LA11;
LA13: ;
{
exprblock_544103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_546984_839829468]).kindU.S6.sons->data[((NI) 0)], d0);
}
LA11: ;
res_547017_839829468 += ((NI) 1);
} LA3: ;
}
}
result0 = lend0;
return result0;
}
N_NIMCALL(void, gencasegenericbranch_546910_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816 e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj178006* labl0) {
Tloc292816 x0;
Tloc292816 y0;
NI length0;
memset((void*)(&x0), 0, sizeof(x0));
memset((void*)(&y0), 0, sizeof(y0));
length0 = sonslen_295351_850551059(b0);
{
NI i_546932_839829468;
NI HEX3Atmp_546958_839829468;
NI res_546961_839829468;
i_546932_839829468 = (NI)0;
HEX3Atmp_546958_839829468 = (NI)0;
HEX3Atmp_546958_839829468 = (NI)(length0 - ((NI) 2));
res_546961_839829468 = ((NI) 0);
{
while (1) {
if (!(res_546961_839829468 <= HEX3Atmp_546958_839829468)) goto LA3;
i_546932_839829468 = res_546961_839829468;
{
TY535235 LOC8;
if (!((*(*b0).kindU.S6.sons->data[i_546932_839829468]).kind == ((Tnodekind292020) 44))) goto LA6;
initlocexpr_539283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_546932_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0));
initlocexpr_539283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_546932_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdcharloc_538227_839829468(e0);
LOC8[1] = rdcharloc_538227_839829468(x0);
LOC8[2] = rdcharloc_538227_839829468(y0);
LOC8[3] = labl0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), rangeformat0, LOC8, 4);
}
goto LA4;
LA6: ;
{
TY535238 LOC10;
initlocexpr_539283_839829468(p0, (*b0).kindU.S6.sons->data[i_546932_839829468], (&x0));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdcharloc_538227_839829468(e0);
LOC10[1] = rdcharloc_538227_839829468(x0);
LOC10[2] = labl0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), eqformat0, LOC10, 3);
}
LA4: ;
res_546961_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(Ropeobj178006*, genifforcaseuntil_547021_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc292816 a0) {
Ropeobj178006* result0;
NI labid0;
result0 = (Ropeobj178006*)0;
labid0 = (*p0).labels;
{
NI i_547042_839829468;
NI res_547083_839829468;
i_547042_839829468 = (NI)0;
res_547083_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547083_839829468 <= until0)) goto LA3;
i_547042_839829468 = res_547083_839829468;
(*p0).labels += ((NI) 1);
{
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
if (!((*(*t0).kindU.S6.sons->data[i_547042_839829468]).kind == ((Tnodekind292020) 85))) goto LA6;
LOC8 = (Ropeobj178006*)0;
LOC8 = rope_178401_2381377266(((NI64) ((*p0).labels)));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC8);
gencasegenericbranch_546910_839829468(p0, (*t0).kindU.S6.sons->data[i_547042_839829468], a0, rangeformat0, eqformat0, LOC9);
}
goto LA4;
LA6: ;
{
TY178507 LOC11;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rope_178401_2381377266(((NI64) ((*p0).labels)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC11, 1);
}
LA4: ;
res_547083_839829468 += ((NI) 1);
} LA3: ;
}
}
{
NI LOC14;
NI gototarget0;
TY178507 LOC17;
TY178507 LOC18;
LOC14 = (NI)0;
LOC14 = len_293081_850551059(t0);
if (!(until0 < (NI)(LOC14 - ((NI) 1)))) goto LA15;
(*p0).labels += ((NI) 1);
gototarget0 = (*p0).labels;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rope_178401_2381377266(((NI64) (gototarget0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC17, 1);
result0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), until0);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rope_178401_2381377266(((NI64) (gototarget0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_599), LOC18, 1);
}
goto LA12;
LA15: ;
{
result0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), until0);
}
LA12: ;
return result0;
}
N_NIMCALL(void, gencasegeneric_547087_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0) {
Tloc292816 a0;
Ropeobj178006* lend0;
NI LOC1;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (NI)0;
LOC1 = sonslen_295351_850551059(t0);
lend0 = genifforcaseuntil_547021_839829468(p0, t0, d0, rangeformat0, eqformat0, (NI)(LOC1 - ((NI) 1)), a0);
fixlabel_539230_839829468(p0, lend0);
}
N_NIMCALL(void, genstringcase_547416_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
NI strings0;
strings0 = ((NI) 0);
{
NI i_547434_839829468;
NI HEX3Atmp_547549_839829468;
NI LOC2;
NI res_547552_839829468;
i_547434_839829468 = (NI)0;
HEX3Atmp_547549_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_547549_839829468 = (NI)(LOC2 - ((NI) 1));
res_547552_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547552_839829468 <= HEX3Atmp_547549_839829468)) goto LA4;
i_547434_839829468 = res_547552_839829468;
{
NI LOC9;
if (!((*(*t0).kindU.S6.sons->data[i_547434_839829468]).kind == ((Tnodekind292020) 85))) goto LA7;
LOC9 = (NI)0;
LOC9 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i_547434_839829468]);
strings0 += (NI)(LOC9 - ((NI) 1));
}
LA7: ;
res_547552_839829468 += ((NI) 1);
} LA4: ;
}
}
{
NI bitmask0;
NI LOC14;
TY191350* branches0;
Tloc292816 a0;
NI labid0;
TY532811 LOC26;
TY533289 LOC35;
Ropeobj178006* lend0;
NI LOC42;
if (!(((NI) 8) < strings0)) goto LA12;
LOC14 = (NI)0;
LOC14 = nextpoweroftwo_101629_1009420244(strings0);
bitmask0 = (NI)(LOC14 - ((NI) 1));
branches0 = (TY191350*)0;
branches0 = (TY191350*) newSeq((&NTI191350), ((NI) ((NI)(bitmask0 + ((NI) 1)))));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
labid0 = (*p0).labels;
{
NI i_547483_839829468;
NI HEX3Atmp_547559_839829468;
NI LOC16;
NI res_547562_839829468;
i_547483_839829468 = (NI)0;
HEX3Atmp_547559_839829468 = (NI)0;
LOC16 = (NI)0;
LOC16 = sonslen_295351_850551059(t0);
HEX3Atmp_547559_839829468 = (NI)(LOC16 - ((NI) 1));
res_547562_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547562_839829468 <= HEX3Atmp_547559_839829468)) goto LA18;
i_547483_839829468 = res_547562_839829468;
(*p0).labels += ((NI) 1);
{
Ropeobj178006* LOC23;
Ropeobj178006* LOC24;
if (!((*(*t0).kindU.S6.sons->data[i_547483_839829468]).kind == ((Tnodekind292020) 85))) goto LA21;
LOC23 = (Ropeobj178006*)0;
LOC23 = rope_178401_2381377266(((NI64) ((*p0).labels)));
LOC24 = (Ropeobj178006*)0;
LOC24 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC23);
gencasestringbranch_547100_839829468(p0, (*t0).kindU.S6.sons->data[i_547483_839829468], a0, LOC24, branches0->data, branches0->Sup.len);
}
goto LA19;
LA21: ;
{
}
LA19: ;
res_547562_839829468 += ((NI) 1);
} LA18: ;
}
}
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rdloc_538188_839829468(a0);
LOC26[1] = rope_178401_2381377266(((NI64) (bitmask0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_596), LOC26, 2);
{
NI j_547517_839829468;
NI HEX3Atmp_547567_839829468;
NI res_547570_839829468;
j_547517_839829468 = (NI)0;
HEX3Atmp_547567_839829468 = (NI)0;
HEX3Atmp_547567_839829468 = (branches0 ? (branches0->Sup.len-1) : -1);
res_547570_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547570_839829468 <= HEX3Atmp_547567_839829468)) goto LA29;
j_547517_839829468 = res_547570_839829468;
{
TY532811 LOC34;
if (!!((branches0->data[j_547517_839829468] == NIM_NIL))) goto LA32;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = intliteral_539270_839829468(((NI64) (j_547517_839829468)));
LOC34[1] = branches0->data[j_547517_839829468];
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_597), LOC34, 2);
}
LA32: ;
res_547570_839829468 += ((NI) 1);
} LA29: ;
}
}
memset((void*)LOC35, 0, sizeof(LOC35));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC35, 0);
{
NI LOC38;
TY178507 LOC41;
LOC38 = (NI)0;
LOC38 = sonslen_295351_850551059(t0);
if (!!(((*(*t0).kindU.S6.sons->data[(NI)(LOC38 - ((NI) 1))]).kind == ((Tnodekind292020) 85)))) goto LA39;
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = rope_178401_2381377266(((NI64) ((*p0).labels)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC41, 1);
}
LA39: ;
LOC42 = (NI)0;
LOC42 = sonslen_295351_850551059(t0);
lend0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), (NI)(LOC42 - ((NI) 1)));
fixlabel_539230_839829468(p0, lend0);
}
goto LA10;
LA12: ;
{
gencasegeneric_547087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_490), ((NimStringDesc*) &T839829468_595));
}
LA10: ;
}
N_NIMCALL(void, gengotoforcase_545673_839829468)(Tcproc529021* p0, Tnode292802* casestmt0) {
{ {
NI i_545695_839829468;
NI HEX3Atmp_545737_839829468;
NI LOC2;
NI res_545740_839829468;
i_545695_839829468 = (NI)0;
HEX3Atmp_545737_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(casestmt0);
HEX3Atmp_545737_839829468 = (LOC2 - 1);
res_545740_839829468 = ((NI) 1);
{
while (1) {
TY533289 LOC5;
NI LOC6;
Tnode292802* it0;
Tnode292802* LOC16;
if (!(res_545740_839829468 <= HEX3Atmp_545737_839829468)) goto LA4;
i_545695_839829468 = res_545740_839829468;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (NI)0;
LOC6 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC5, 0);
it0 = (*casestmt0).kindU.S6.sons->data[i_545695_839829468];
{
NI j_545711_839829468;
NI HEX3Atmp_545730_839829468;
NI LOC8;
NI res_545733_839829468;
j_545711_839829468 = (NI)0;
HEX3Atmp_545730_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(it0);
HEX3Atmp_545730_839829468 = (NI)(LOC8 - ((NI) 2));
res_545733_839829468 = ((NI) 0);
{
while (1) {
NI64 val0;
TY178507 LOC15;
if (!(res_545733_839829468 <= HEX3Atmp_545730_839829468)) goto LA10;
j_545711_839829468 = res_545733_839829468;
{
if (!((*(*it0).kindU.S6.sons->data[j_545711_839829468]).kind == ((Tnodekind292020) 44))) goto LA13;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579));
goto BeforeRet;
}
LA13: ;
val0 = getordvalue_320129_3876443242((*it0).kindU.S6.sons->data[j_545711_839829468]);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = rope_178401_2381377266(val0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_602), LOC15, 1);
res_545733_839829468 += ((NI) 1);
} LA10: ;
}
}
LOC16 = (Tnode292802*)0;
LOC16 = lastson_295364_850551059(it0);
genstmts_539244_839829468(p0, LOC16);
endblock_544060_839829468(p0);
res_545740_839829468 += ((NI) 1);
} LA4: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(NIM_BOOL, branchhastoobigrange_547575_839829468)(Tnode292802* b0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
NI i_547590_839829468;
NI HEX3Atmp_547608_839829468;
NI LOC2;
NI res_547611_839829468;
i_547590_839829468 = (NI)0;
HEX3Atmp_547608_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(b0);
HEX3Atmp_547608_839829468 = (NI)(LOC2 - ((NI) 2));
res_547611_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547611_839829468 <= HEX3Atmp_547608_839829468)) goto LA4;
i_547590_839829468 = res_547611_839829468;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*b0).kindU.S6.sons->data[i_547590_839829468]).kind == ((Tnodekind292020) 44));
if (!(LOC7)) goto LA8;
LOC7 = (IL64(256) < (NI64)((*(*(*b0).kindU.S6.sons->data[i_547590_839829468]).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval - (*(*(*b0).kindU.S6.sons->data[i_547590_839829468]).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval));
LA8: ;
if (!LOC7) goto LA9;
result0 = NIM_TRUE;
goto BeforeRet;
}
LA9: ;
res_547611_839829468 += ((NI) 1);
} LA4: ;
}
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(NI, ifswitchsplitpoint_547615_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
NI i_547630_839829468;
NI HEX3Atmp_547654_839829468;
NI LOC2;
NI res_547657_839829468;
i_547630_839829468 = (NI)0;
HEX3Atmp_547654_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
HEX3Atmp_547654_839829468 = (NI)(LOC2 - ((NI) 1));
res_547657_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* branch0;
Tnode292802* stmtblock0;
if (!(res_547657_839829468 <= HEX3Atmp_547654_839829468)) goto LA4;
i_547630_839829468 = res_547657_839829468;
branch0 = HEX5BHEX5D_293238_850551059(n0, i_547630_839829468);
stmtblock0 = lastson_295364_850551059(branch0);
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = stmtscontainpragma_528083_2036603609(stmtblock0, ((Tspecialword275003) 181));
if (!LOC7) goto LA8;
result0 = i_547630_839829468;
}
goto LA5;
LA8: ;
{
if (!!(((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 0))&7U)))!=0))) goto LA11;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = ((*branch0).kind == ((Tnodekind292020) 85));
if (!(LOC15)) goto LA16;
LOC15 = branchhastoobigrange_547575_839829468(branch0);
LA16: ;
if (!LOC15) goto LA17;
result0 = i_547630_839829468;
}
LA17: ;
}
goto LA5;
LA11: ;
LA5: ;
res_547657_839829468 += ((NI) 1);
} LA4: ;
}
}
return result0;
}
N_NIMCALL(void, genordinalcase_547724_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI splitpoint0;
Tloc292816 a0;
Ropeobj178006* lend0;
splitpoint0 = ifswitchsplitpoint_547615_839829468(p0, n0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
if (!(((NI) 0) < splitpoint0)) goto LA3;
lend0 = genifforcaseuntil_547021_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601), splitpoint0, a0);
}
goto LA1;
LA3: ;
{
lend0 = NIM_NIL;
}
LA1: ;
{
NI LOC8;
TY178507 LOC11;
NIM_BOOL hasdefault0;
TY533289 LOC37;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(n0);
if (!((NI)(splitpoint0 + ((NI) 1)) < LOC8)) goto LA9;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdcharloc_538227_839829468(a0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_603), LOC11, 1);
hasdefault0 = NIM_FALSE;
{
NI i_547757_839829468;
NI HEX3Atmp_547816_839829468;
NI HEX3Atmp_547817_839829468;
NI LOC13;
NI res_547820_839829468;
i_547757_839829468 = (NI)0;
HEX3Atmp_547816_839829468 = (NI)0;
HEX3Atmp_547817_839829468 = (NI)0;
HEX3Atmp_547816_839829468 = (NI)(splitpoint0 + ((NI) 1));
LOC13 = (NI)0;
LOC13 = len_293081_850551059(n0);
HEX3Atmp_547817_839829468 = (LOC13 - 1);
res_547820_839829468 = HEX3Atmp_547816_839829468;
{
while (1) {
Tnode292802* branch0;
Tnode292802* LOC28;
TY533289 LOC29;
if (!(res_547820_839829468 <= HEX3Atmp_547817_839829468)) goto LA15;
i_547757_839829468 = res_547820_839829468;
{
NIM_BOOL LOC18;
LOC18 = (NIM_BOOL)0;
LOC18 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC18)) goto LA19;
LOC18 = isemptytype_297440_850551059((*n0).typ);
LA19: ;
if (!LOC18) goto LA20;
(*d0).k = ((Tlockind292808) 0);
}
LA20: ;
branch0 = HEX5BHEX5D_293238_850551059(n0, i_547757_839829468);
{
if (!((*branch0).kind == ((Tnodekind292020) 85))) goto LA24;
gencaserange_537028_839829468(p0, branch0);
}
goto LA22;
LA24: ;
{
TY533289 LOC27;
memset((void*)LOC27, 0, sizeof(LOC27));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_181), LOC27, 0);
hasdefault0 = NIM_TRUE;
}
LA22: ;
LOC28 = (Tnode292802*)0;
LOC28 = lastson_295364_850551059(branch0);
exprblock_544103_839829468(p0, LOC28, d0);
memset((void*)LOC29, 0, sizeof(LOC29));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_182), LOC29, 0);
res_547820_839829468 += ((NI) 1);
} LA15: ;
}
}
{
NIM_BOOL LOC32;
TY533289 LOC36;
LOC32 = (NIM_BOOL)0;
LOC32 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 3))&7U)))!=0);
if (!(LOC32)) goto LA33;
LOC32 = !(hasdefault0);
LA33: ;
if (!LOC32) goto LA34;
memset((void*)LOC36, 0, sizeof(LOC36));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_604), LOC36, 0);
}
LA34: ;
memset((void*)LOC37, 0, sizeof(LOC37));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC37, 0);
}
LA9: ;
{
if (!!((lend0 == NIM_NIL))) goto LA40;
fixlabel_539230_839829468(p0, lend0);
}
LA40: ;
}
N_NIMCALL(void, gencase_547826_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Ttype292840* LOC8;
genlinedir_532823_839829468(p0, t0);
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297440_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440));
switch ((*LOC8).kind) {
case ((Ttypekind292244) 28):
{
genstringcase_547416_839829468(p0, t0, d0);
}
break;
case ((Ttypekind292244) 36) ... ((Ttypekind292244) 39):
{
gencasegeneric_547087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601));
}
break;
default:
{
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC14)) goto LA15;
LOC14 = (((*(*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0);
LA15: ;
if (!LOC14) goto LA16;
gengotoforcase_545673_839829468(p0, t0);
}
goto LA12;
LA16: ;
{
genordinalcase_547724_839829468(p0, t0, d0);
}
LA12: ;
}
break;
}
}
static N_INLINE(Tnode292802*, pop_318246_1689653243)(Tnodeseq292796** s0) {
Tnode292802* result0;
NI L0;
result0 = (Tnode292802*)0;
L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1));
result0 = (*s0)->data[L0];
(*s0) = (Tnodeseq292796*) setLengthSeq(&((*s0))->Sup, sizeof(Tnode292802*), ((NI) (L0)));
return result0;
}
N_NIMCALL(void, blockleaveactions_545442_839829468)(Tcproc529021* p0, NI howmanytrys0, NI howmanyexcepts0) {
Tnodeseq292796* stack0;
NI alreadypoppedcnt0;
stack0 = (Tnodeseq292796*)0;
stack0 = (Tnodeseq292796*) newSeq((&NTI292796), ((NI) 0));
alreadypoppedcnt0 = (*p0).inexceptblock;
{
NI i_545471_839829468;
NI res_545596_839829468;
i_545471_839829468 = (NI)0;
res_545596_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* trystmt0;
Tnode292802* finallystmt0;
if (!(res_545596_839829468 <= howmanytrys0)) goto LA3;
i_545471_839829468 = res_545596_839829468;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC6) goto LA7;
LOC6 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA7: ;
if (!!(LOC6)) goto LA8;
{
if (!(((NI) 0) < alreadypoppedcnt0)) goto LA12;
alreadypoppedcnt0 -= ((NI) 1);
}
goto LA10;
LA12: ;
{
TY533289 LOC15;
memset((void*)LOC15, 0, sizeof(LOC15));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC15, 0);
}
LA10: ;
}
LA8: ;
trystmt0 = pop_318246_1689653243((&(*p0).nestedtrystmts));
stack0 = (Tnodeseq292796*) incrSeqV2(&(stack0)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&stack0->data[stack0->Sup.len]), trystmt0);
++stack0->Sup.len;
finallystmt0 = lastson_295364_850551059(trystmt0);
{
if (!((*finallystmt0).kind == ((Tnodekind292020) 107))) goto LA18;
genstmts_539244_839829468(p0, (*finallystmt0).kindU.S6.sons->data[((NI) 0)]);
}
LA18: ;
res_545596_839829468 += ((NI) 1);
} LA3: ;
}
}
{
NI i_545546_839829468;
NI HEX3Atmp_545601_839829468;
NI res_545604_839829468;
i_545546_839829468 = (NI)0;
HEX3Atmp_545601_839829468 = (NI)0;
HEX3Atmp_545601_839829468 = (NI)(howmanytrys0 - ((NI) 1));
res_545604_839829468 = HEX3Atmp_545601_839829468;
{
while (1) {
if (!(((NI) 0) <= res_545604_839829468)) goto LA22;
i_545546_839829468 = res_545604_839829468;
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), stack0->data[i_545546_839829468]);
++(*p0).nestedtrystmts->Sup.len;
res_545604_839829468 -= ((NI) 1);
} LA22: ;
}
}
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC25) goto LA26;
LOC25 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA26: ;
if (!!(LOC25)) goto LA27;
{
NI i_545587_839829468;
NI HEX3Atmp_545610_839829468;
NI res_545613_839829468;
i_545587_839829468 = (NI)0;
HEX3Atmp_545610_839829468 = (NI)0;
HEX3Atmp_545610_839829468 = (NI)(howmanyexcepts0 - ((NI) 1));
res_545613_839829468 = HEX3Atmp_545610_839829468;
{
while (1) {
TY533289 LOC32;
if (!(((NI) 0) <= res_545613_839829468)) goto LA31;
i_545587_839829468 = res_545613_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC32, 0);
res_545613_839829468 -= ((NI) 1);
} LA31: ;
}
}
}
LA27: ;
}
N_NIMCALL(void, genreturnstmt_545617_839829468)(Tcproc529021* p0, Tnode292802* t0) {
TY533289 LOC14;
{ {
if (!(((*t0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0)) goto LA3;
goto BeforeRet;
}
LA3: ;
(*p0).beforeretneeded = NIM_TRUE;
genlinedir_532823_839829468(p0, t0);
{
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA7;
genstmts_539244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)]);
}
LA7: ;
blockleaveactions_545442_839829468(p0, ((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0), (*p0).inexceptblock);
{
Ropeobj178006* safepoint0;
TY178507 LOC13;
if (!(((NI) 0) < ((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0))) goto LA11;
safepoint0 = (*p0).finallysafepoints->data[(NI)(((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0) - ((NI) 1))];
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_607), LOC13, 1);
}
LA11: ;
memset((void*)LOC14, 0, sizeof(LOC14));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_608), LOC14, 0);
}BeforeRet: ;
}
N_NIMCALL(void, genbreakstmt_546444_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI idx0;
Ropeobj178006* label0;
TY178507 LOC16;
idx0 = (*p0).breakidx;
{
Tsym292834* sym0;
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA3;
sym0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
idx0 = (NI)((*sym0).position - ((NI) 1));
}
goto LA1;
LA3: ;
{
{
while (1) {
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = (((NI) 0) <= idx0);
if (!(LOC8)) goto LA9;
LOC8 = !((*p0).blocks->data[idx0].isloop);
LA9: ;
if (!LOC8) goto LA7;
idx0 -= ((NI) 1);
} LA7: ;
}
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = (idx0 < ((NI) 0));
if (LOC12) goto LA13;
LOC12 = !((*p0).blocks->data[idx0].isloop);
LA13: ;
if (!LOC12) goto LA14;
internalerror_196100_155036129((*t0).info, ((NimStringDesc*) &T839829468_609));
}
LA14: ;
}
LA1: ;
label0 = assignlabel_544020_839829468((&(*p0).blocks->data[idx0]));
blockleaveactions_545442_839829468(p0, (NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) ((*p0).blocks->data[idx0].nestedtrystmts))), (NI)((*p0).inexceptblock - ((NI) ((*p0).blocks->data[idx0].nestedexceptstmts))));
genlinedir_532823_839829468(p0, t0);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = label0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC16, 1);
}
N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_549080_839829468)(Tcproc529021* p0, Tnode292802* asgn0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
Tnode292802* le0;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0)) goto LA3;
le0 = (*asgn0).kindU.S6.sons->data[((NI) 0)];
{
Tsym292834* field0;
if (!((*le0).kind == ((Tnodekind292020) 46))) goto LA7;
field0 = (*(*(*le0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag292184) 18))&31U)))!=0);
}
goto LA5;
LA7: ;
{
Tsym292834* field0;
if (!((*le0).kind == ((Tnodekind292020) 45))) goto LA10;
field0 = (*(*le0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag292184) 18))&31U)))!=0);
}
goto LA5;
LA10: ;
LA5: ;
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, discriminatortabledecl_536094_839829468)(Tcgen529027* m0, Ttype292840* objtype0, Tsym292834* d0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
Ropeobj178006* tmp0;
TY532811 LOC2;
NI64 LOC3;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_130));
tmp0 = discriminatortablename_536057_839829468(m0, objtype0, d0);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = tmp0;
LOC3 = (NI64)0;
LOC3 = lengthord_320007_3876443242((*d0).typ);
LOC2[1] = rope_178401_2381377266((NI64)(LOC3 + IL64(1)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_203), LOC2, 2);
return result0;
}
N_NIMCALL(void, gendiscriminantcheck_549144_839829468)(Tcproc529021* p0, Tloc292816 a0, Tloc292816 tmp0, Ttype292840* objtype0, Tsym292834* field0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
NI64 L0;
TY535235 LOC8;
t0 = skiptypes_296099_850551059(objtype0, IL64(211106240964864));
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468((*p0).module, t0);
L0 = lengthord_320007_3876443242((*field0).typ);
{
NIM_BOOL LOC4;
TY178507 LOC7;
LOC4 = (NIM_BOOL)0;
LOC4 = containsorincl_268862_2627731572((&(*(*p0).module).declaredthings), (*field0).Sup.id);
if (!!(LOC4)) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = discriminatortabledecl_536094_839829468((*p0).module, t0, field0);
appcg_532640_839829468((*p0).module, ((Tcfilesection529005) 9), ((NimStringDesc*) &T839829468_610), LOC7, 1);
}
LA5: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC8[1] = rdloc_538188_839829468(tmp0);
LOC8[2] = discriminatortablename_536057_839829468((*p0).module, t0, field0);
LOC8[3] = intliteral_539270_839829468((NI64)(L0 + IL64(1)));
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_611), LOC8, 4);
}
N_NIMCALL(void, asgnfielddiscriminant_549209_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 tmp0;
Tnode292802* dotexpr0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
dotexpr0 = (*e0).kindU.S6.sons->data[((NI) 0)];
{
if (!((*dotexpr0).kind == ((Tnodekind292020) 46))) goto LA3;
dotexpr0 = (*dotexpr0).kindU.S6.sons->data[((NI) 0)];
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
gettemp_537032_839829468(p0, a0.t, (&tmp0), NIM_FALSE);
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
gendiscriminantcheck_549144_839829468(p0, a0, tmp0, (*(*dotexpr0).kindU.S6.sons->data[((NI) 0)]).typ, (*(*dotexpr0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym);
genassignment_539264_839829468(p0, a0, tmp0, 0);
}
N_NIMCALL(void, genasgn_549239_839829468)(Tcproc529021* p0, Tnode292802* e0, NIM_BOOL fastasgn0) {
genlinedir_532823_839829468(p0, e0);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC3)) goto LA4;
LOC3 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
gengotovar_544258_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)]);
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
Tloc292816 a0;
LOC8 = (NIM_BOOL)0;
LOC8 = fielddiscriminantcheckneeded_549080_839829468(p0, e0);
if (!!(LOC8)) goto LA9;
memset((void*)(&a0), 0, sizeof(a0));
{
Tnode292802* LOC13;
Tnode292802* LOC16;
LOC13 = (Tnode292802*)0;
LOC13 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
if (!((*LOC13).kind == ((Tnodekind292020) 47) || (*LOC13).kind == ((Tnodekind292020) 65))) goto LA14;
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
genderef_543921_839829468(p0, LOC16, (&a0), NIM_TRUE);
}
goto LA11;
LA14: ;
{
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA11: ;
{
if (!fastasgn0) goto LA20;
a0.flags |= ((NU16)1)<<((((Tlocflag292810) 2))%(sizeof(NU16)*8));
}
LA20: ;
loadinto_543928_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
}
goto LA1;
LA9: ;
{
asgnfielddiscriminant_549209_839829468(p0, e0);
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, genasmoremitstmt_548529_839829468)(Tcproc529021* p0, Tnode292802* t0, NIM_BOOL isasmstmt0) {
Ropeobj178006* result0;
NimStringDesc* res0;
result0 = (Ropeobj178006*)0;
res0 = copyString(((NimStringDesc*) &T839829468_490));
{
NI i_548547_839829468;
NI HEX3Atmp_548644_839829468;
NI LOC2;
NI res_548647_839829468;
i_548547_839829468 = (NI)0;
HEX3Atmp_548644_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_548644_839829468 = (NI)(LOC2 - ((NI) 1));
res_548647_839829468 = ((NI) 0);
{
while (1) {
if (!(res_548647_839829468 <= HEX3Atmp_548644_839829468)) goto LA4;
i_548547_839829468 = res_548647_839829468;
switch ((*(*t0).kindU.S6.sons->data[i_548547_839829468]).kind) {
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
res0 = resizeString(res0, (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S3.strval->Sup.len + 0);
appendString(res0, (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S3.strval);
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* sym0;
sym0 = (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S4.sym;
{
Tloc292816 a0;
Ropeobj178006* LOC11;
NimStringDesc* LOC12;
if (!((28672 &(1U<<((NU)((*sym0).kind)&31U)))!=0)) goto LA9;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[i_548547_839829468], (&a0));
LOC11 = (Ropeobj178006*)0;
LOC11 = rdloc_538188_839829468(a0);
LOC12 = (NimStringDesc*)0;
LOC12 = HEX24_178856_2381377266(LOC11);
res0 = resizeString(res0, LOC12->Sup.len + 0);
appendString(res0, LOC12);
}
goto LA7;
LA9: ;
{
Ropeobj178006* LOC16;
NimStringDesc* LOC17;
if (!((*sym0).kind == ((Tsymkind292435) 7))) goto LA14;
LOC16 = (Ropeobj178006*)0;
LOC16 = gettypedesc_535671_839829468((*p0).module, (*sym0).typ);
LOC17 = (NimStringDesc*)0;
LOC17 = HEX24_178856_2381377266(LOC16);
res0 = resizeString(res0, LOC17->Sup.len + 0);
appendString(res0, LOC17);
}
goto LA7;
LA14: ;
{
Ropeobj178006* r0;
NimStringDesc* LOC23;
r0 = (*sym0).loc.r;
{
if (!(r0 == NIM_NIL)) goto LA21;
r0 = manglename_533205_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), r0);
}
LA21: ;
LOC23 = (NimStringDesc*)0;
LOC23 = HEX24_178856_2381377266(r0);
res0 = resizeString(res0, LOC23->Sup.len + 0);
appendString(res0, LOC23);
}
LA7: ;
}
break;
default:
{
internalerror_196100_155036129((*(*t0).kindU.S6.sons->data[i_548547_839829468]).info, ((NimStringDesc*) &T839829468_612));
}
break;
}
res_548647_839829468 += ((NI) 1);
} LA4: ;
}
}
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = isasmstmt0;
if (!(LOC27)) goto LA28;
LOC27 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 5))&7U)))!=0);
LA28: ;
if (!LOC27) goto LA29;
{
NimStringDesc* x_548604_839829468;
NI first_548656_839829468;
NI last_548658_839829468;
x_548604_839829468 = (NimStringDesc*)0;
first_548656_839829468 = ((NI) 0);
last_548658_839829468 = ((NI) 0);
{
while (1) {
NI j0;
{
while (1) {
if (!!((((NU8)(res0->data[last_548658_839829468])) == ((NU8)(0)) || ((NU8)(res0->data[last_548658_839829468])) == ((NU8)(13)) || ((NU8)(res0->data[last_548658_839829468])) == ((NU8)(10))))) goto LA35;
last_548658_839829468 += ((NI) 1);
} LA35: ;
}
x_548604_839829468 = copyStrLast(res0, first_548656_839829468, (NI)(last_548658_839829468 - ((NI) 1)));
j0 = ((NI) 0);
{
while (1) {
if (!(((NU8)(x_548604_839829468->data[j0])) == ((NU8)(32)) || ((NU8)(x_548604_839829468->data[j0])) == ((NU8)(9)))) goto LA37;
j0 += ((NI) 1);
} LA37: ;
}
{
if (!(((NU8)(x_548604_839829468->data[j0])) == ((NU8)(34)) || ((NU8)(x_548604_839829468->data[j0])) == ((NU8)(58)))) goto LA40;
add_178487_2381377266(&result0, x_548604_839829468);
add_178487_2381377266(&result0, tnl_176644_4151366050);
}
goto LA38;
LA40: ;
{
if (!!(((NU8)(x_548604_839829468->data[j0]) == (NU8)(0)))) goto LA43;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_613));
add_178487_2381377266(&result0, x_548604_839829468);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_614));
}
goto LA38;
LA43: ;
LA38: ;
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(10))) goto LA47;
last_548658_839829468 += ((NI) 1);
}
goto LA45;
LA47: ;
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(13))) goto LA50;
last_548658_839829468 += ((NI) 1);
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(10))) goto LA54;
last_548658_839829468 += ((NI) 1);
}
LA54: ;
}
goto LA45;
LA50: ;
{
goto LA32;
}
LA45: ;
first_548656_839829468 = last_548658_839829468;
}
} LA32: ;
}
}
goto LA25;
LA29: ;
{
res0 = resizeString(res0, tnl_176644_4151366050->Sup.len + 0);
appendString(res0, tnl_176644_4151366050);
result0 = rope_178277_2381377266(res0);
}
LA25: ;
return result0;
}
N_NIMCALL(void, genasmstmt_548659_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Ropeobj178006* s0;
genlinedir_532823_839829468(p0, t0);
s0 = genasmoremitstmt_548529_839829468(p0, t0, NIM_TRUE);
{
TY178507 LOC5;
if (!((*p0).prc == NIM_NIL)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = s0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 7))- 0], Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field17, LOC5, 1);
}
goto LA1;
LA3: ;
{
TY178507 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = s0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field17, LOC7, 1);
}
LA1: ;
}
static N_INLINE(void, gensimpleblock_544095_839829468)(Tcproc529021* p0, Tnode292802* stmts0) {
TY533289 LOC1;
NI LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0);
genstmts_539244_839829468(p0, stmts0);
endblock_544060_839829468(p0);
}
N_NIMCALL(void, gentrycpp_547865_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Ropeobj178006* exc0;
TY533289 LOC16;
NI LOC17;
NI length0;
TY178507 LOC18;
Ropeobj178006* LOC19;
NI i0;
NIM_BOOL catchallpresent0;
TY533289 LOC78;
Tnode292802* LOC79;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297440_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
genlinedir_532823_839829468(p0, t0);
exc0 = gettempname_533596_839829468((*p0).module);
{
Tsym292834* LOC10;
Ropeobj178006* LOC13;
LOC10 = (Tsym292834*)0;
LOC10 = getcompilerproc_338746_3937434831(((NimStringDesc*) &T839829468_615));
if (!!((LOC10 == NIM_NIL))) goto LA11;
LOC13 = (Ropeobj178006*)0;
LOC13 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615));
}
goto LA8;
LA11: ;
{
Ropeobj178006* LOC15;
LOC15 = (Ropeobj178006*)0;
LOC15 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616));
}
LA8: ;
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0);
++(*p0).nestedtrystmts->Sup.len;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (NI)0;
LOC17 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_617), LOC16, 0);
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0);
length0 = sonslen_295351_850551059(t0);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = exc0;
LOC19 = (Ropeobj178006*)0;
LOC19 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_618), LOC18, 1);
endblock_544035_839829468(p0, LOC19);
{
TY533289 LOC24;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_619), LOC24, 0);
}
LA22: ;
(*p0).inexceptblock += ((NI) 1);
i0 = ((NI) 1);
catchallpresent0 = NIM_FALSE;
{
while (1) {
NIM_BOOL LOC27;
NI blen0;
LOC27 = (NIM_BOOL)0;
LOC27 = (i0 < length0);
if (!(LOC27)) goto LA28;
LOC27 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 87));
LA28: ;
if (!LOC27) goto LA26;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC31)) goto LA32;
LOC31 = isemptytype_297440_850551059((*t0).typ);
LA32: ;
if (!LOC31) goto LA33;
(*d0).k = ((Tlockind292808) 0);
}
LA33: ;
blen0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i0]);
{
Ropeobj178006** LOC39;
TY533289 LOC40;
if (!(((NI) 1) < i0)) goto LA37;
LOC39 = (Ropeobj178006**)0;
LOC39 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
memset((void*)LOC40, 0, sizeof(LOC40));
addf_179205_2381377266(LOC39, ((NimStringDesc*) &T839829468_620), LOC40, 0);
}
LA37: ;
{
TY533289 LOC45;
NI LOC46;
TY533289 LOC47;
if (!(blen0 == ((NI) 1))) goto LA43;
catchallpresent0 = NIM_TRUE;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (NI)0;
LOC46 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC47, 0, sizeof(LOC47));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC47, 0);
endblock_544060_839829468(p0);
}
goto LA41;
LA43: ;
{
Ropeobj178006* orexpr0;
TY178507 LOC57;
TY533289 LOC58;
NI LOC59;
TY533289 LOC60;
orexpr0 = NIM_NIL;
{
NI j_547978_839829468;
NI HEX3Atmp_548101_839829468;
NI res_548104_839829468;
j_547978_839829468 = (NI)0;
HEX3Atmp_548101_839829468 = (NI)0;
HEX3Atmp_548101_839829468 = (NI)(blen0 - ((NI) 2));
res_548104_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC56;
if (!(res_548104_839829468 <= HEX3Atmp_548101_839829468)) goto LA51;
j_547978_839829468 = res_548104_839829468;
{
if (!!((orexpr0 == NIM_NIL))) goto LA54;
add_178487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229));
}
LA54: ;
memset((void*)LOC56, 0, sizeof(LOC56));
LOC56[0] = exc0;
LOC56[1] = gentypeinfo_535941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_547978_839829468]).typ);
appcg_532632_839829468((*p0).module, &orexpr0, ((NimStringDesc*) &T839829468_621), LOC56, 2);
res_548104_839829468 += ((NI) 1);
} LA51: ;
}
}
memset((void*)LOC57, 0, sizeof(LOC57));
LOC57[0] = orexpr0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_622), LOC57, 1);
memset((void*)LOC58, 0, sizeof(LOC58));
LOC59 = (NI)0;
LOC59 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC58, 0);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0);
memset((void*)LOC60, 0, sizeof(LOC60));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC60, 0);
endblock_544060_839829468(p0);
}
LA41: ;
i0 += ((NI) 1);
} LA26: ;
}
{
TY533289 LOC70;
NI LOC71;
Tnode292802* finallyblock0;
TY533289 LOC76;
Ropeobj178006* LOC77;
if (!!(catchallpresent0)) goto LA63;
{
TY533289 LOC69;
if (!(((NI) 1) < i0)) goto LA67;
memset((void*)LOC69, 0, sizeof(LOC69));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_620), LOC69, 0);
}
LA67: ;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC71 = (NI)0;
LOC71 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC70, 0);
finallyblock0 = lastson_295364_850551059(t0);
{
if (!((*finallyblock0).kind == ((Tnodekind292020) 107))) goto LA74;
genstmts_539244_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]);
}
LA74: ;
memset((void*)LOC76, 0, sizeof(LOC76));
LOC77 = (Ropeobj178006*)0;
LOC77 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_623), LOC76, 0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), LOC77);
endblock_544060_839829468(p0);
}
LA63: ;
memset((void*)LOC78, 0, sizeof(LOC78));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC78, 0);
(*p0).inexceptblock -= ((NI) 1);
LOC79 = (Tnode292802*)0;
LOC79 = pop_318246_1689653243((&(*p0).nestedtrystmts));
{
NIM_BOOL LOC82;
LOC82 = (NIM_BOOL)0;
LOC82 = (i0 < length0);
if (!(LOC82)) goto LA83;
LOC82 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 107));
LA83: ;
if (!LOC82) goto LA84;
gensimpleblock_544095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]);
}
LA84: ;
}
N_NIMCALL(void, line_532695_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* r0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = rope_178277_2381377266(r0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
static N_INLINE(Ropeobj178006*, pop_178530_1689653243)(TY191350** s0) {
Ropeobj178006* result0;
NI L0;
result0 = (Ropeobj178006*)0;
L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1));
result0 = (*s0)->data[L0];
(*s0) = (TY191350*) setLengthSeq(&((*s0))->Sup, sizeof(Ropeobj178006*), ((NI) (L0)));
return result0;
}
N_NIMCALL(void, gentry_548114_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
NIM_BOOL LOC8;
Ropeobj178006* safepoint0;
TY178507 LOC17;
TY178507 LOC18;
TY178507 LOC37;
NI LOC38;
NI length0;
TY533289 LOC39;
TY533289 LOC40;
NI LOC41;
TY533289 LOC42;
NI i0;
Tnode292802* LOC95;
TY178507 LOC103;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297440_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
LOC8 = (NIM_BOOL)0;
LOC8 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_624));
genlinedir_532823_839829468(p0, t0);
safepoint0 = gettempname_533596_839829468((*p0).module);
{
Tsym292834* LOC11;
Ropeobj178006* LOC14;
LOC11 = (Tsym292834*)0;
LOC11 = getcompilerproc_338746_3937434831(((NimStringDesc*) &T839829468_615));
if (!!((LOC11 == NIM_NIL))) goto LA12;
LOC14 = (Ropeobj178006*)0;
LOC14 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615));
}
goto LA9;
LA12: ;
{
Ropeobj178006* LOC16;
LOC16 = (Ropeobj178006*)0;
LOC16 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616));
}
LA9: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_625), LOC17, 1);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_626), LOC18, 1);
{
NIM_BOOL LOC21;
TY178507 LOC24;
LOC21 = (NIM_BOOL)0;
LOC21 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_627));
if (!LOC21) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_628), LOC24, 1);
}
goto LA19;
LA22: ;
{
NIM_BOOL LOC26;
TY178507 LOC29;
LOC26 = (NIM_BOOL)0;
LOC26 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_629));
if (!LOC26) goto LA27;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_630), LOC29, 1);
}
goto LA19;
LA27: ;
{
NIM_BOOL LOC31;
TY178507 LOC34;
LOC31 = (NIM_BOOL)0;
LOC31 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_631));
if (!LOC31) goto LA32;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_632), LOC34, 1);
}
goto LA19;
LA32: ;
{
TY178507 LOC36;
memset((void*)LOC36, 0, sizeof(LOC36));
LOC36[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_628), LOC36, 1);
}
LA19: ;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = safepoint0;
LOC38 = (NI)0;
LOC38 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_633), LOC37, 1);
length0 = sonslen_295351_850551059(t0);
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0);
++(*p0).nestedtrystmts->Sup.len;
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC39, 0, sizeof(LOC39));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC39, 0);
endblock_544060_839829468(p0);
memset((void*)LOC40, 0, sizeof(LOC40));
LOC41 = (NI)0;
LOC41 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_634), LOC40, 0);
memset((void*)LOC42, 0, sizeof(LOC42));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC42, 0);
{
TY533289 LOC47;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA45;
memset((void*)LOC47, 0, sizeof(LOC47));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_619), LOC47, 0);
}
LA45: ;
(*p0).inexceptblock += ((NI) 1);
i0 = ((NI) 1);
{
while (1) {
NIM_BOOL LOC50;
NI blen0;
LOC50 = (NIM_BOOL)0;
LOC50 = (i0 < length0);
if (!(LOC50)) goto LA51;
LOC50 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 87));
LA51: ;
if (!LOC50) goto LA49;
{
NIM_BOOL LOC54;
LOC54 = (NIM_BOOL)0;
LOC54 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC54)) goto LA55;
LOC54 = isemptytype_297440_850551059((*t0).typ);
LA55: ;
if (!LOC54) goto LA56;
(*d0).k = ((Tlockind292808) 0);
}
LA56: ;
blen0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i0]);
{
TY533289 LOC67;
NI LOC68;
TY178507 LOC69;
TY533289 LOC70;
if (!(blen0 == ((NI) 1))) goto LA60;
{
TY533289 LOC66;
if (!(((NI) 1) < i0)) goto LA64;
memset((void*)LOC66, 0, sizeof(LOC66));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_635), LOC66, 0);
}
LA64: ;
memset((void*)LOC67, 0, sizeof(LOC67));
LOC68 = (NI)0;
LOC68 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC67, 0);
memset((void*)LOC69, 0, sizeof(LOC69));
LOC69[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_636), LOC69, 1);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC70, 0, sizeof(LOC70));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC70, 0);
endblock_544060_839829468(p0);
}
goto LA58;
LA60: ;
{
Ropeobj178006* orexpr0;
TY178507 LOC91;
NI LOC92;
TY178507 LOC93;
TY533289 LOC94;
orexpr0 = NIM_NIL;
{
NI j_548247_839829468;
NI HEX3Atmp_548521_839829468;
NI res_548524_839829468;
j_548247_839829468 = (NI)0;
HEX3Atmp_548521_839829468 = (NI)0;
HEX3Atmp_548521_839829468 = (NI)(blen0 - ((NI) 2));
res_548524_839829468 = ((NI) 0);
{
while (1) {
NimStringDesc* isobjformat0;
TY178507 LOC86;
if (!(res_548524_839829468 <= HEX3Atmp_548521_839829468)) goto LA74;
j_548247_839829468 = res_548524_839829468;
{
if (!!((orexpr0 == NIM_NIL))) goto LA77;
add_178487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229));
}
LA77: ;
{
NIM_BOOL LOC81;
LOC81 = (NIM_BOOL)0;
LOC81 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC81) goto LA82;
LOC81 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA82: ;
if (!!(LOC81)) goto LA83;
isobjformat0 = copyString(((NimStringDesc*) &T839829468_637));
}
goto LA79;
LA83: ;
{
isobjformat0 = copyString(((NimStringDesc*) &T839829468_638));
}
LA79: ;
memset((void*)LOC86, 0, sizeof(LOC86));
LOC86[0] = gentypeinfo_535941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_548247_839829468]).typ);
appcg_532632_839829468((*p0).module, &orexpr0, isobjformat0, LOC86, 1);
res_548524_839829468 += ((NI) 1);
} LA74: ;
}
}
{
if (!(((NI) 1) < i0)) goto LA89;
line_532695_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_620));
}
LA89: ;
memset((void*)LOC91, 0, sizeof(LOC91));
LOC91[0] = orexpr0;
LOC92 = (NI)0;
LOC92 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_639), LOC91, 1);
memset((void*)LOC93, 0, sizeof(LOC93));
LOC93[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_636), LOC93, 1);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0);
memset((void*)LOC94, 0, sizeof(LOC94));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC94, 0);
endblock_544060_839829468(p0);
}
LA58: ;
i0 += ((NI) 1);
} LA49: ;
}
(*p0).inexceptblock -= ((NI) 1);
LOC95 = (Tnode292802*)0;
LOC95 = pop_318246_1689653243((&(*p0).nestedtrystmts));
endblock_544060_839829468(p0);
{
NIM_BOOL LOC98;
Ropeobj178006* LOC102;
LOC98 = (NIM_BOOL)0;
LOC98 = (i0 < length0);
if (!(LOC98)) goto LA99;
LOC98 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 107));
LA99: ;
if (!LOC98) goto LA100;
(*p0).finallysafepoints = (TY191350*) incrSeqV2(&((*p0).finallysafepoints)->Sup, sizeof(Ropeobj178006*));
asgnRefNoCycle((void**) (&(*p0).finallysafepoints->data[(*p0).finallysafepoints->Sup.len]), safepoint0);
++(*p0).finallysafepoints->Sup.len;
gensimpleblock_544095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]);
LOC102 = (Ropeobj178006*)0;
LOC102 = pop_178530_1689653243((&(*p0).finallysafepoints));
}
LA100: ;
memset((void*)LOC103, 0, sizeof(LOC103));
LOC103[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_640), LOC103, 1);
}
N_NIMCALL(NimStringDesc*, getraisefrmt_546824_839829468)(Tcproc529021* p0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
result0 = copyString(((NimStringDesc*) &T839829468_641));
return result0;
}
N_NIMCALL(void, genraisestmt_546828_839829468)(Tcproc529021* p0, Tnode292802* t0) {
{
Tnode292802* finallyblock0;
if (!(((NI) 0) < (*p0).inexceptblock)) goto LA3;
finallyblock0 = lastson_295364_850551059((*p0).nestedtrystmts->data[(NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) 1))]);
{
if (!((*finallyblock0).kind == ((Tnodekind292020) 107))) goto LA7;
gensimpleblock_544095_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]);
}
LA7: ;
}
LA3: ;
{
Tloc292816 a0;
Ropeobj178006* e0;
Ttype292840* typ0;
NimStringDesc* LOC13;
TY532811 LOC14;
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA11;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
e0 = rdloc_538188_839829468(a0);
typ0 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106247256320));
genlinedir_532823_839829468(p0, t0);
LOC13 = (NimStringDesc*)0;
LOC13 = getraisefrmt_546824_839829468(p0);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = e0;
LOC14[1] = makecstring_191638_155036129((*(*(*typ0).sym).name).s);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), LOC13, LOC14, 2);
}
goto LA9;
LA11: ;
{
genlinedir_532823_839829468(p0, t0);
{
NIM_BOOL LOC18;
NIM_BOOL LOC19;
TY533289 LOC24;
Ropeobj178006* LOC25;
LOC18 = (NIM_BOOL)0;
LOC19 = (NIM_BOOL)0;
LOC19 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC19) goto LA20;
LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA20: ;
LOC18 = LOC19;
if (!(LOC18)) goto LA21;
LOC18 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 31))&63U)))!=0));
LA21: ;
if (!LOC18) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC25 = (Ropeobj178006*)0;
LOC25 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_623), LOC24, 0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), LOC25);
}
goto LA16;
LA22: ;
{
TY533289 LOC27;
memset((void*)LOC27, 0, sizeof(LOC27));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_642), LOC27, 0);
}
LA16: ;
}
LA9: ;
}
N_NIMCALL(void, gentypesection_538184_839829468)(Tcgen529027* m0, Tnode292802* n0) {
}
N_NIMCALL(Tcfilesection529005, determinesection_548819_839829468)(Tnode292802* n0) {
Tcfilesection529005 result0;
result0 = (Tcfilesection529005)0;
result0 = ((Tcfilesection529005) 7);
{
NIM_BOOL LOC3;
NI LOC4;
NimStringDesc* sec0;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = len_293081_850551059(n0);
LOC3 = (((NI) 1) <= LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind >= ((Tnodekind292020) 20) && (*(*n0).kindU.S6.sons->data[((NI) 0)]).kind <= ((Tnodekind292020) 22));
LA5: ;
if (!LOC3) goto LA6;
sec0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S3.strval;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_643));
if (!LOC10) goto LA11;
result0 = ((Tcfilesection529005) 3);
}
goto LA8;
LA11: ;
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_644));
if (!LOC14) goto LA15;
result0 = ((Tcfilesection529005) 9);
}
goto LA8;
LA15: ;
{
NIM_BOOL LOC18;
LOC18 = (NIM_BOOL)0;
LOC18 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_645));
if (!LOC18) goto LA19;
result0 = ((Tcfilesection529005) 1);
}
goto LA8;
LA19: ;
LA8: ;
}
LA6: ;
return result0;
}
N_NIMCALL(void, genemit_548839_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Ropeobj178006* s0;
s0 = genasmoremitstmt_548529_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], NIM_FALSE);
{
Tcfilesection529005 section0;
Tnode292802* LOC5;
if (!((*p0).prc == NIM_NIL)) goto LA3;
LOC5 = (Tnode292802*)0;
LOC5 = HEX5BHEX5D_293238_850551059(t0, ((NI) 1));
section0 = determinesection_548819_839829468(LOC5);
genclinedir_532813_839829468(&(*(*p0).module).s[(section0)- 0], (*t0).info);
add_178482_2381377266(&(*(*p0).module).s[(section0)- 0], s0);
}
goto LA1;
LA3: ;
{
genlinedir_532823_839829468(p0, t0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), s0);
}
LA1: ;
}
N_NIMCALL(void, genbreakpoint_548862_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NimStringDesc* name0;
name0 = (NimStringDesc*)0;
{
TY535238 LOC12;
NI LOC13;
NimStringDesc* LOC14;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 17))&31U)))!=0)) goto LA3;
{
if (!((*t0).kind == ((Tnodekind292020) 34))) goto LA7;
name0 = nsuNormalize((*(*t0).kindU.S6.sons->data[((NI) 1)]).kindU.S3.strval);
}
goto LA5;
LA7: ;
{
NimStringDesc* LOC10;
NimStringDesc* LOC11;
breakpointid_548860_839829468 += ((NI) 1);
LOC10 = (NimStringDesc*)0;
LOC11 = (NimStringDesc*)0;
LOC11 = nimIntToStr(breakpointid_548860_839829468);
LOC10 = rawNewString(LOC11->Sup.len + 2);
appendString(LOC10, ((NimStringDesc*) &T839829468_646));
appendString(LOC10, LOC11);
name0 = LOC10;
}
LA5: ;
genlinedir_532823_839829468(p0, t0);
memset((void*)LOC12, 0, sizeof(LOC12));
LOC13 = (NI)0;
LOC13 = tolinenumber_192415_155036129((*t0).info);
LOC12[0] = rope_178401_2381377266(((NI64) (LOC13)));
LOC14 = (NimStringDesc*)0;
LOC14 = tofilename_192260_155036129((*t0).info.fileindex);
LOC12[1] = makecstring_191638_155036129(LOC14);
LOC12[2] = makecstring_191638_155036129(name0);
appcg_532632_839829468((*p0).module, &gbreakpoints_548861_839829468, ((NimStringDesc*) &T839829468_647), LOC12, 3);
}
LA3: ;
}
N_NIMCALL(void, genwatchpoint_549016_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
Ttype292840* typ0;
TY535238 LOC5;
NimStringDesc* LOC6;
{ {
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 17))&31U)))!=0))) goto LA3;
goto BeforeRet;
}
LA3: ;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
typ0 = skiptypes_296099_850551059((*(*n0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = addrloc_538204_839829468(a0);
LOC6 = (NimStringDesc*)0;
LOC6 = rendertree_311044_382274130((*n0).kindU.S6.sons->data[((NI) 1)], 0);
LOC5[1] = makecstring_191638_155036129(LOC6);
LOC5[2] = gentypeinfo_535941_839829468((*p0).module, typ0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_648), LOC5, 3);
}BeforeRet: ;
}
N_NIMCALL(void, genpragma_549039_839829468)(Tcproc529021* p_549041_839829468, Tnode292802* n0) {
{
NI i_549054_839829468;
NI HEX3Atmp_549073_839829468;
NI LOC2;
NI res_549076_839829468;
i_549054_839829468 = (NI)0;
HEX3Atmp_549073_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(n0);
HEX3Atmp_549073_839829468 = (NI)(LOC2 - ((NI) 1));
res_549076_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
Tspecialword275003 LOC5;
if (!(res_549076_839829468 <= HEX3Atmp_549073_839829468)) goto LA4;
i_549054_839829468 = res_549076_839829468;
it0 = (*n0).kindU.S6.sons->data[i_549054_839829468];
LOC5 = (Tspecialword275003)0;
LOC5 = whichpragma_318911_2616423590(it0);
switch (LOC5) {
case ((Tspecialword275003) 191):
{
genemit_548839_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 131):
{
genbreakpoint_548862_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 176):
{
genwatchpoint_549016_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 183):
{
Tcproc529021* p0;
Ropeobj178006** LOC10;
p0 = newproc_529206_3723162438(NIM_NIL, (*p_549041_839829468).module);
(*p0).options = ((*p0).options & ~ 98304);
genstmts_539244_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)]);
LOC10 = (Ropeobj178006**)0;
LOC10 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
asgnRefNoCycle((void**) (&(*(*p0).module).injectstmt), (*LOC10));
}
break;
default:
{
}
break;
}
res_549076_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, genparforstmt_546208_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI oldbreakidx_546411_839829468;
Tsym292834* forloopvar0;
Tloc292816 rangea0;
Tloc292816 rangeb0;
Tnode292802* call0;
TY535235 LOC1;
NimStringDesc* LOC2;
TY533289 LOC3;
(*p0).withinloop += ((NI) 1);
genlinedir_532823_839829468(p0, t0);
oldbreakidx_546411_839829468 = (*p0).breakidx;
forloopvar0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
memset((void*)(&rangea0), 0, sizeof(rangea0));
memset((void*)(&rangeb0), 0, sizeof(rangeb0));
assignlocalvar_538614_839829468(p0, forloopvar0);
call0 = (*t0).kindU.S6.sons->data[((NI) 1)];
initlocexpr_539283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 1)], (&rangea0));
initlocexpr_539283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 2)], (&rangeb0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((*forloopvar0).loc);
LOC1[1] = rdloc_538188_839829468(rangea0);
LOC1[2] = rdloc_538188_839829468(rangeb0);
LOC2 = (NimStringDesc*)0;
LOC2 = getstr_297230_850551059((*call0).kindU.S6.sons->data[((NI) 3)]);
LOC1[3] = rope_178277_2381377266(LOC2);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_649), LOC1, 4);
memset((void*)LOC3, 0, sizeof(LOC3));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC3, 0);
(*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE;
genstmts_539244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 2)]);
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546411_839829468;
(*p0).withinloop -= ((NI) 1);
}
N_NIMCALL(void, genstate_544117_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI64 idx0;
TY178507 LOC9;
{
NIM_BOOL LOC3;
NI LOC4;
NimStringDesc* LOC8;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = len_293081_850551059(n0);
LOC3 = (LOC4 == ((NI) 1));
if (!(LOC3)) goto LA5;
LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 6));
LA5: ;
if (!!(LOC3)) goto LA6;
LOC8 = (NimStringDesc*)0;
LOC8 = HEX24_196185_1689653243(T839829468_650);
internalerror_196113_155036129(LOC8);
}
LA6: ;
idx0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = rope_178401_2381377266(idx0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_652), LOC9, 1);
}
N_NIMCALL(void, gengotostate_544144_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
TY178507 LOC1;
TY533289 LOC2;
TY533289 LOC7;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_603), LOC1, 1);
(*p0).beforeretneeded = NIM_TRUE;
memset((void*)LOC2, 0, sizeof(LOC2));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_653), LOC2, 0);
{
NI64 i_544214_839829468;
NI64 HEX3Atmp_544223_839829468;
NI64 res_544226_839829468;
i_544214_839829468 = (NI64)0;
HEX3Atmp_544223_839829468 = (NI64)0;
HEX3Atmp_544223_839829468 = lastord_320004_3876443242((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ);
res_544226_839829468 = IL64(0);
{
while (1) {
TY178507 LOC6;
if (!(res_544226_839829468 <= HEX3Atmp_544223_839829468)) goto LA5;
i_544214_839829468 = res_544226_839829468;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266(i_544214_839829468);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_654), LOC6, 1);
res_544226_839829468 += ((NI) 1);
} LA5: ;
}
}
memset((void*)LOC7, 0, sizeof(LOC7));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC7, 0);
}
N_NIMCALL(void, genbreakstate_544229_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
TY178507 LOC5;
if (!((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 155))) goto LA3;
initlocexpr_539283_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(a0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_655), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY178507 LOC7;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(a0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_656), LOC7, 1);
}
LA1: ;
}
N_NIMCALL(void, expr_539248_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
switch ((*n0).kind) {
case ((Tnodekind292020) 3):
{
Tsym292834* sym0;
sym0 = (*n0).kindU.S4.sym;
switch ((*sym0).kind) {
case ((Tsymkind292435) 13):
{
{
if (!!(((33554448 & (*sym0).flags) == 0))) goto LA5;
fillprocloc_539201_839829468(sym0);
genprocprototype_539254_839829468((*p0).module, sym0);
}
goto LA3;
LA5: ;
{
genproc_532951_839829468((*p0).module, sym0);
}
LA3: ;
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
break;
case ((Tsymkind292435) 12):
case ((Tsymkind292435) 15):
case ((Tsymkind292435) 14):
{
{
NimStringDesc* LOC13;
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)) goto LA11;
LOC13 = (NimStringDesc*)0;
LOC13 = rawNewString((*(*sym0).name).s->Sup.len + 48);
appendString(LOC13, ((NimStringDesc*) &T839829468_270));
appendString(LOC13, (*(*sym0).name).s);
localerror_196085_155036129((*n0).info, LOC13);
}
LA11: ;
genproc_532951_839829468((*p0).module, sym0);
{
NIM_BOOL LOC16;
NimStringDesc* LOC20;
LOC16 = (NIM_BOOL)0;
LOC16 = ((*sym0).loc.r == NIM_NIL);
if (LOC16) goto LA17;
LOC16 = ((*sym0).loc.t == NIM_NIL);
LA17: ;
if (!LOC16) goto LA18;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString((*(*sym0).name).s->Sup.len + 20);
appendString(LOC20, ((NimStringDesc*) &T839829468_271));
appendString(LOC20, (*(*sym0).name).s);
internalerror_196100_155036129((*n0).info, LOC20);
}
LA18: ;
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
break;
case ((Tsymkind292435) 10):
{
{
NIM_BOOL LOC24;
Ropeobj178006* LOC27;
LOC24 = (NIM_BOOL)0;
LOC24 = issimpleconst_532311_839829468((*sym0).typ);
if (!LOC24) goto LA25;
LOC27 = (Ropeobj178006*)0;
LOC27 = genliteral_549476_839829468(p0, (*sym0).ast, (*sym0).typ);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC27, ((Tstorageloc292812) 1));
}
goto LA22;
LA25: ;
{
gencomplexconst_558249_839829468(p0, sym0, d0);
}
LA22: ;
}
break;
case ((Tsymkind292435) 19):
{
Ropeobj178006* LOC30;
LOC30 = (Ropeobj178006*)0;
LOC30 = rope_178401_2381377266(((NI64) ((*sym0).position)));
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC30, ((Tstorageloc292812) 0));
}
break;
case ((Tsymkind292435) 8):
case ((Tsymkind292435) 20):
case ((Tsymkind292435) 11):
case ((Tsymkind292435) 9):
{
{
if (!!(((4194312 & (*sym0).flags) == 0))) goto LA34;
genvarprototype_539236_839829468((*p0).module, sym0);
}
LA34: ;
{
NIM_BOOL LOC38;
NimStringDesc* LOC42;
NimStringDesc* LOC43;
LOC38 = (NIM_BOOL)0;
LOC38 = ((*sym0).loc.r == NIM_NIL);
if (LOC38) goto LA39;
LOC38 = ((*sym0).loc.t == NIM_NIL);
LA39: ;
if (!LOC38) goto LA40;
LOC42 = (NimStringDesc*)0;
LOC43 = (NimStringDesc*)0;
LOC43 = nimIntToStr((*sym0).Sup.id);
LOC42 = rawNewString((*(*sym0).name).s->Sup.len + LOC43->Sup.len + 20);
appendString(LOC42, ((NimStringDesc*) &T839829468_285));
appendString(LOC42, (*(*sym0).name).s);
appendString(LOC42, ((NimStringDesc*) &T839829468_12));
appendString(LOC42, LOC43);
internalerror_196100_155036129((*n0).info, LOC42);
}
LA40: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA46;
accessthreadlocalvar_532945_839829468(p0, sym0);
{
NIM_BOOL LOC50;
Ropeobj178006* LOC53;
LOC50 = (NIM_BOOL)0;
LOC50 = emulatedthreadvars_532949_839829468();
if (!LOC50) goto LA51;
LOC53 = (Ropeobj178006*)0;
LOC53 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_288), (*sym0).loc.r);
putintodest_550468_839829468(p0, d0, (*sym0).loc.t, LOC53, ((Tstorageloc292812) 0));
}
goto LA48;
LA51: ;
{
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
LA48: ;
}
goto LA44;
LA46: ;
{
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
LA44: ;
}
break;
case ((Tsymkind292435) 5):
{
{
NIM_BOOL LOC59;
NimStringDesc* LOC63;
NimStringDesc* LOC64;
LOC59 = (NIM_BOOL)0;
LOC59 = ((*sym0).loc.r == NIM_NIL);
if (LOC59) goto LA60;
LOC59 = ((*sym0).loc.t == NIM_NIL);
LA60: ;
if (!LOC59) goto LA61;
LOC63 = (NimStringDesc*)0;
LOC64 = (NimStringDesc*)0;
LOC64 = nimIntToStr((*sym0).Sup.id);
LOC63 = rawNewString((*(*sym0).name).s->Sup.len + LOC64->Sup.len + 21);
appendString(LOC63, ((NimStringDesc*) &T839829468_289));
appendString(LOC63, (*(*sym0).name).s);
appendString(LOC63, ((NimStringDesc*) &T839829468_12));
appendString(LOC63, LOC64);
internalerror_196100_155036129((*n0).info, LOC63);
}
LA61: ;
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
break;
case ((Tsymkind292435) 3):
{
{
NIM_BOOL LOC68;
NimStringDesc* LOC72;
NimStringDesc* LOC73;
LOC68 = (NIM_BOOL)0;
LOC68 = ((*sym0).loc.r == NIM_NIL);
if (LOC68) goto LA69;
LOC68 = ((*sym0).loc.t == NIM_NIL);
LA69: ;
if (!LOC68) goto LA70;
LOC72 = (NimStringDesc*)0;
LOC73 = (NimStringDesc*)0;
LOC73 = nimIntToStr((*sym0).Sup.id);
LOC72 = rawNewString((*(*sym0).name).s->Sup.len + LOC73->Sup.len + 22);
appendString(LOC72, ((NimStringDesc*) &T839829468_290));
appendString(LOC72, (*(*sym0).name).s);
appendString(LOC72, ((NimStringDesc*) &T839829468_12));
appendString(LOC72, LOC73);
internalerror_196100_155036129((*n0).info, LOC72);
}
LA70: ;
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
break;
default:
{
NimStringDesc* LOC75;
LOC75 = (NimStringDesc*)0;
LOC75 = rawNewString(reprEnum((NI)(*sym0).kind, (&NTI292435))->Sup.len + 22);
appendString(LOC75, ((NimStringDesc*) &T839829468_291));
appendString(LOC75, reprEnum((NI)(*sym0).kind, (&NTI292435)));
appendString(LOC75, ((NimStringDesc*) &T839829468_292));
internalerror_196100_155036129((*n0).info, LOC75);
}
break;
}
}
break;
case ((Tnodekind292020) 23):
{
{
NIM_BOOL LOC79;
Ropeobj178006* LOC82;
LOC79 = (NIM_BOOL)0;
LOC79 = isemptytype_297440_850551059((*n0).typ);
if (!!(LOC79)) goto LA80;
LOC82 = (Ropeobj178006*)0;
LOC82 = genliteral_539273_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC82, ((Tstorageloc292812) 0));
}
LA80: ;
}
break;
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
Ropeobj178006* LOC84;
LOC84 = (Ropeobj178006*)0;
LOC84 = genliteral_539273_839829468(p0, n0);
putdataintodest_550436_839829468(p0, d0, (*n0).typ, LOC84);
}
break;
case ((Tnodekind292020) 6) ... ((Tnodekind292020) 15):
case ((Tnodekind292020) 16) ... ((Tnodekind292020) 19):
case ((Tnodekind292020) 5):
{
Ropeobj178006* LOC86;
LOC86 = (Ropeobj178006*)0;
LOC86 = genliteral_539273_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC86, ((Tstorageloc292812) 0));
}
break;
case ((Tnodekind292020) 27):
case ((Tnodekind292020) 32):
case ((Tnodekind292020) 29):
case ((Tnodekind292020) 30):
case ((Tnodekind292020) 31):
case ((Tnodekind292020) 26):
case ((Tnodekind292020) 28):
{
Tnode292802* op0;
genlinedir_532823_839829468(p0, n0);
op0 = (*n0).kindU.S6.sons->data[((NI) 0)];
{
Tloc292816 a0;
if (!(*n0).typ == 0) goto LA90;
memset((void*)(&a0), 0, sizeof(a0));
{
NIM_BOOL LOC94;
LOC94 = (NIM_BOOL)0;
LOC94 = ((*op0).kind == ((Tnodekind292020) 3));
if (!(LOC94)) goto LA95;
LOC94 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic292524) 0)));
LA95: ;
if (!LOC94) goto LA96;
genmagicexpr_557033_839829468(p0, n0, (&a0), (*(*op0).kindU.S4.sym).magic);
}
goto LA92;
LA96: ;
{
gencall_543632_839829468(p0, n0, (&a0));
}
LA92: ;
}
goto LA88;
LA90: ;
{
{
NIM_BOOL LOC102;
LOC102 = (NIM_BOOL)0;
LOC102 = ((*op0).kind == ((Tnodekind292020) 3));
if (!(LOC102)) goto LA103;
LOC102 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic292524) 0)));
LA103: ;
if (!LOC102) goto LA104;
genmagicexpr_557033_839829468(p0, n0, d0, (*(*op0).kindU.S4.sym).magic);
}
goto LA100;
LA104: ;
{
gencall_543632_839829468(p0, n0, d0);
}
LA100: ;
}
LA88: ;
}
break;
case ((Tnodekind292020) 39):
{
{
NIM_BOOL LOC110;
NI LOC112;
Ropeobj178006* LOC115;
LOC110 = (NIM_BOOL)0;
LOC110 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC110)) goto LA111;
LOC112 = (NI)0;
LOC112 = len_293081_850551059(n0);
LOC110 = !((LOC112 == ((NI) 0)));
LA111: ;
if (!LOC110) goto LA113;
LOC115 = (Ropeobj178006*)0;
LOC115 = gensetnode_549664_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC115, ((Tstorageloc292812) 0));
}
goto LA108;
LA113: ;
{
gensetconstr_557496_839829468(p0, n0, d0);
}
LA108: ;
}
break;
case ((Tnodekind292020) 41):
{
{
NIM_BOOL LOC120;
NI LOC122;
LOC120 = (NIM_BOOL)0;
LOC120 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC120)) goto LA121;
LOC122 = (NI)0;
LOC122 = len_293081_850551059(n0);
LOC120 = !((LOC122 == ((NI) 0)));
LA121: ;
if (!LOC120) goto LA123;
exprcomplexconst_558684_839829468(p0, n0, d0);
}
goto LA118;
LA123: ;
{
Ttype292840* LOC126;
LOC126 = (Ttype292840*)0;
LOC126 = skiptypes_296099_850551059((*n0).typ, IL64(211106242013440));
if (!((*LOC126).kind == ((Ttypekind292244) 24))) goto LA127;
genseqconstr_555004_839829468(p0, n0, d0);
}
goto LA118;
LA127: ;
{
genarrayconstr_558207_839829468(p0, n0, d0);
}
LA118: ;
}
break;
case ((Tnodekind292020) 37):
{
{
NIM_BOOL LOC133;
NI LOC135;
LOC133 = (NIM_BOOL)0;
LOC133 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC133)) goto LA134;
LOC135 = (NI)0;
LOC135 = len_293081_850551059(n0);
LOC133 = !((LOC135 == ((NI) 0)));
LA134: ;
if (!LOC133) goto LA136;
exprcomplexconst_558684_839829468(p0, n0, d0);
}
goto LA131;
LA136: ;
{
gentupleconstr_557618_839829468(p0, n0, d0);
}
LA131: ;
}
break;
case ((Tnodekind292020) 38):
{
genobjconstr_554903_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 61):
{
gencast_556537_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 58):
case ((Tnodekind292020) 59):
case ((Tnodekind292020) 60):
{
genconv_556632_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 64):
case ((Tnodekind292020) 63):
{
genaddr_553051_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 42):
{
genbracketexpr_554277_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 47):
case ((Tnodekind292020) 65):
{
genderef_543921_839829468(p0, n0, d0, NIM_FALSE);
}
break;
case ((Tnodekind292020) 45):
{
genrecordfield_553448_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 46):
{
gencheckedrecordfield_554046_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 127):
case ((Tnodekind292020) 112):
{
genblock_546083_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 126):
{
genstmtlistexpr_558402_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 115):
{
{
NI i_559023_839829468;
NI HEX3Atmp_559276_839829468;
NI LOC151;
NI res_559279_839829468;
i_559023_839829468 = (NI)0;
HEX3Atmp_559276_839829468 = (NI)0;
LOC151 = (NI)0;
LOC151 = sonslen_295351_850551059(n0);
HEX3Atmp_559276_839829468 = (NI)(LOC151 - ((NI) 1));
res_559279_839829468 = ((NI) 0);
{
while (1) {
if (!(res_559279_839829468 <= HEX3Atmp_559276_839829468)) goto LA153;
i_559023_839829468 = res_559279_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[i_559023_839829468]);
res_559279_839829468 += ((NI) 1);
} LA153: ;
}
}
}
break;
case ((Tnodekind292020) 48):
case ((Tnodekind292020) 92):
{
genif_544982_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 93):
{
expr_539248_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[((NI) 0)], d0);
}
break;
case ((Tnodekind292020) 66):
{
downconv_558581_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 67):
{
upconv_558431_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 68):
{
genrangechck_556590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_563));
}
break;
case ((Tnodekind292020) 69):
{
genrangechck_556590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_564));
}
break;
case ((Tnodekind292020) 70):
{
genrangechck_556590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_565));
}
break;
case ((Tnodekind292020) 71):
{
convstrtocstr_556642_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 72):
{
convcstrtostr_556654_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 51):
case ((Tnodekind292020) 52):
{
Tsym292834* sym0;
sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
genproc_532951_839829468((*p0).module, sym0);
{
NIM_BOOL LOC166;
NimStringDesc* LOC170;
LOC166 = (NIM_BOOL)0;
LOC166 = ((*sym0).loc.r == NIM_NIL);
if (LOC166) goto LA167;
LOC166 = ((*sym0).loc.t == NIM_NIL);
LA167: ;
if (!LOC166) goto LA168;
LOC170 = (NimStringDesc*)0;
LOC170 = rawNewString((*(*sym0).name).s->Sup.len + 20);
appendString(LOC170, ((NimStringDesc*) &T839829468_271));
appendString(LOC170, (*(*sym0).name).s);
internalerror_196100_155036129((*n0).info, LOC170);
}
LA168: ;
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
break;
case ((Tnodekind292020) 155):
{
genclosure_557836_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 1):
{
}
break;
case ((Tnodekind292020) 96):
{
genwhilestmt_545984_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 99):
case ((Tnodekind292020) 100):
{
genvarstmt_544854_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 101):
{
genconststmt_544909_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 94):
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_594));
}
break;
case ((Tnodekind292020) 97):
{
gencase_547826_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 109):
{
genreturnstmt_545617_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 110):
{
genbreakstmt_546444_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 73):
{
{
if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0))) goto LA183;
genasgn_549239_839829468(p0, n0, NIM_FALSE);
}
LA183: ;
}
break;
case ((Tnodekind292020) 74):
{
{
if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0))) goto LA188;
genasgn_549239_839829468(p0, n0, !(((*p0).prc == NIM_NIL)));
}
LA188: ;
}
break;
case ((Tnodekind292020) 114):
{
{
Tloc292816 a0;
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA193;
genlinedir_532823_839829468(p0, n0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA193: ;
}
break;
case ((Tnodekind292020) 89):
{
genasmstmt_548659_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 106):
{
{
NIM_BOOL LOC199;
NIM_BOOL LOC200;
LOC199 = (NIM_BOOL)0;
LOC200 = (NIM_BOOL)0;
LOC200 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC200) goto LA201;
LOC200 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA201: ;
LOC199 = LOC200;
if (!(LOC199)) goto LA202;
LOC199 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 31))&63U)))!=0));
LA202: ;
if (!LOC199) goto LA203;
gentrycpp_547865_839829468(p0, n0, d0);
}
goto LA197;
LA203: ;
{
gentry_548114_839829468(p0, n0, d0);
}
LA197: ;
}
break;
case ((Tnodekind292020) 108):
{
genraisestmt_546828_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 98):
{
gentypesection_538184_839829468((*p0).module, n0);
}
break;
case ((Tnodekind292020) 125):
case ((Tnodekind292020) 84):
case ((Tnodekind292020) 121):
case ((Tnodekind292020) 116):
case ((Tnodekind292020) 117):
case ((Tnodekind292020) 118):
case ((Tnodekind292020) 119):
case ((Tnodekind292020) 120):
case ((Tnodekind292020) 83):
case ((Tnodekind292020) 82):
{
}
break;
case ((Tnodekind292020) 90):
{
genpragma_549039_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 91):
{
Tnode292802* LOC211;
LOC211 = (Tnode292802*)0;
LOC211 = lastson_295364_850551059(n0);
expr_539248_839829468(p0, LOC211, d0);
}
break;
case ((Tnodekind292020) 79):
case ((Tnodekind292020) 80):
case ((Tnodekind292020) 81):
{
{
Tsym292834* prc0;
if (!((*(*n0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1))) goto LA215;
prc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NIM_BOOL LOC219;
Tsym292834* LOC220;
LOC219 = (NIM_BOOL)0;
LOC220 = (Tsym292834*)0;
LOC220 = skipgenericowner_297279_850551059(prc0);
LOC219 = ((*LOC220).kind == ((Tsymkind292435) 6));
if (!(LOC219)) goto LA221;
LOC219 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0));
LA221: ;
if (!LOC219) goto LA222;
{
NIM_BOOL LOC226;
NIM_BOOL LOC227;
NIM_BOOL LOC228;
NIM_BOOL LOC229;
Tsym292834* LOC231;
NIM_BOOL LOC234;
LOC226 = (NIM_BOOL)0;
LOC227 = (NIM_BOOL)0;
LOC228 = (NIM_BOOL)0;
LOC229 = (NIM_BOOL)0;
LOC229 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0));
if (!(LOC229)) goto LA230;
LOC231 = (Tsym292834*)0;
LOC231 = getmodule_299123_2984716966(prc0);
LOC229 = !((((*LOC231).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0));
LA230: ;
LOC228 = LOC229;
if (LOC228) goto LA232;
LOC228 = ((65600 & (*prc0).flags) == 64);
LA232: ;
LOC227 = LOC228;
if (LOC227) goto LA233;
LOC234 = (NIM_BOOL)0;
LOC234 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC234)) goto LA235;
LOC234 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 5))&15U)))!=0);
LA235: ;
LOC227 = LOC234;
LA233: ;
LOC226 = LOC227;
if (LOC226) goto LA236;
LOC226 = ((*prc0).kind == ((Tsymkind292435) 13));
LA236: ;
if (!LOC226) goto LA237;
{
NIM_BOOL LOC241;
Tnode292802* LOC242;
LOC241 = (NIM_BOOL)0;
LOC242 = (Tnode292802*)0;
LOC242 = getbody_335227_1724185294(prc0);
LOC241 = !(((*LOC242).kind == ((Tnodekind292020) 1)));
if (LOC241) goto LA243;
LOC241 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0);
LA243: ;
if (!LOC241) goto LA244;
genproc_532951_839829468((*p0).module, prc0);
}
LA244: ;
}
LA237: ;
}
LA222: ;
}
LA215: ;
}
break;
case ((Tnodekind292020) 95):
{
genparforstmt_546208_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 157):
{
genstate_544117_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 156):
{
gengotostate_544144_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 158):
{
genbreakstate_544229_839829468(p0, n0);
}
break;
default:
{
NimStringDesc* LOC251;
LOC251 = (NimStringDesc*)0;
LOC251 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI292020))->Sup.len + 25);
appendString(LOC251, ((NimStringDesc*) &T839829468_291));
appendString(LOC251, reprEnum((NI)(*n0).kind, (&NTI292020)));
appendString(LOC251, ((NimStringDesc*) &T839829468_657));
internalerror_196100_155036129((*n0).info, LOC251);
}
break;
}
}
N_NIMCALL(void, genstmts_539244_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
expr_539248_839829468(p0, t0, (&a0));
{
NimStringDesc* LOC5;
if (!!(((7 &(1U<<((NU)(a0.k)&15U)))!=0))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_658);
internalerror_196113_155036129(LOC5);
}
LA3: ;
}
N_NIMCALL(Tnode292802*, myprocess_563402_839829468)(Tpasscontext341002* b0, Tnode292802* n0) {
Tnode292802* result0;
Tcgen529027* m0;
{ result0 = (Tnode292802*)0;
result0 = n0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (b0 == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = skipcodegen_341085_2355241294(n0);
LA4: ;
if (!LOC3) goto LA5;
goto BeforeRet;
}
LA5: ;
m0 = ((Tcgen529027*) (b0));
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
genstmts_539244_839829468((*m0).initproc, n0);
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getsomeinitname_561904_839829468)(Tsym292834* m0, NimStringDesc* suffix0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NimStringDesc* LOC5;
if (!((12288 & (*m0).flags) == 0)) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = mangle_528847_2036603609((*(*(*m0).owner).name).s);
result0 = rope_178277_2381377266(LOC5);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_12));
}
LA3: ;
add_178487_2381377266(&result0, (*(*m0).name).s);
add_178487_2381377266(&result0, suffix0);
return result0;
}
N_NIMCALL(Ropeobj178006*, getinitname_562235_839829468)(Tsym292834* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getsomeinitname_561904_839829468(m0, ((NimStringDesc*) &T839829468_659));
return result0;
}
N_NIMCALL(Ropeobj178006*, getdatinitname_562239_839829468)(Tsym292834* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getsomeinitname_561904_839829468(m0, ((NimStringDesc*) &T839829468_660));
return result0;
}
N_NIMCALL(void, registermoduletomain_562243_839829468)(Tsym292834* m0) {
Ropeobj178006* init0;
Ropeobj178006* datinit0;
TY178507 LOC1;
TY178507 LOC2;
init0 = getinitname_562235_839829468(m0);
datinit0 = getdatinitname_562239_839829468(m0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = init0;
addf_179205_2381377266(&mainmodprocs_529148_3723162438, ((NimStringDesc*) &T839829468_661), LOC1, 1);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = datinit0;
addf_179205_2381377266(&mainmodprocs_529148_3723162438, ((NimStringDesc*) &T839829468_661), LOC2, 1);
{
TY178507 LOC7;
Ropeobj178006* initcall0;
TY178507 LOC8;
if (!!((((*m0).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0))) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = datinit0;
addf_179205_2381377266(&maindatinit_529151_3723162438, ((NimStringDesc*) &T839829468_662), LOC7, 1);
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = init0;
initcall0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_662), LOC8, 1);
{
if (!(((*m0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA11;
add_178482_2381377266(&mainmodinit_529149_3723162438, initcall0);
}
goto LA9;
LA11: ;
{
add_178482_2381377266(&othermodsinit_529150_3723162438, initcall0);
}
LA9: ;
}
LA5: ;
}
N_NIMCALL(Ropeobj178006*, genfilenames_561688_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_673));
result0 = NIM_NIL;
{
NI i_561717_839829468;
NI HEX3Atmp_561722_839829468;
NI res_561725_839829468;
i_561717_839829468 = (NI)0;
HEX3Atmp_561722_839829468 = (NI)0;
HEX3Atmp_561722_839829468 = ((fileinfos_191629_155036129 ? fileinfos_191629_155036129->Sup.len : 0) - 1);
res_561725_839829468 = ((NI) 0);
{
while (1) {
TY178507 LOC5;
if (!(res_561725_839829468 <= HEX3Atmp_561722_839829468)) goto LA4;
i_561717_839829468 = res_561725_839829468;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = makecstring_191638_155036129(fileinfos_191629_155036129->data[i_561717_839829468].projpath);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_674), LOC5, 1);
res_561725_839829468 += ((NI) 1);
} LA4: ;
}
}
return result0;
}
N_NIMCALL(void, genmainproc_561729_839829468)(Tcgen529027* m0) {
NimStringDesc* nimmain0;
NimStringDesc* othermain0;
Ropeobj178006* initstackbottomcall0;
TY536475 LOC38;
TY535238 LOC47;
nimmain0 = (NimStringDesc*)0;
othermain0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC12;
LOC3 = (NIM_BOOL)0;
LOC3 = (targetos_176629_4151366050 == ((Tsystemos176004) 2));
if (!(LOC3)) goto LA4;
LOC3 = !(((gglobaloptions_169130_2607990831 & 1280) == 0));
LA4: ;
if (!LOC3) goto LA5;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 10))&63U)))!=0)) goto LA9;
nimmain0 = copyString(((NimStringDesc*) &T839829468_663));
othermain0 = copyString(((NimStringDesc*) &T839829468_664));
}
goto LA7;
LA9: ;
{
nimmain0 = copyString(((NimStringDesc*) &T839829468_665));
othermain0 = copyString(((NimStringDesc*) &T839829468_666));
}
LA7: ;
LOC12 = (NIM_BOOL)0;
LOC12 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_667));
}
goto LA1;
LA5: ;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 8))&63U)))!=0)) goto LA14;
nimmain0 = copyString(((NimStringDesc*) &T839829468_665));
othermain0 = copyString(((NimStringDesc*) &T839829468_668));
}
goto LA1;
LA14: ;
{
if (!(targetos_176629_4151366050 == ((Tsystemos176004) 24))) goto LA17;
nimmain0 = copyString(((NimStringDesc*) &T839829468_669));
othermain0 = copyString(((NimStringDesc*) &T839829468_670));
}
goto LA1;
LA17: ;
{
nimmain0 = copyString(((NimStringDesc*) &T839829468_669));
othermain0 = copyString(((NimStringDesc*) &T839829468_671));
}
LA1: ;
{
Ropeobj178006* LOC24;
if (!!((gbreakpoints_548861_839829468 == NIM_NIL))) goto LA22;
LOC24 = (Ropeobj178006*)0;
LOC24 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_672));
}
LA22: ;
{
Ropeobj178006* LOC29;
if (!((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 17))&31U)))!=0)) goto LA27;
LOC29 = (Ropeobj178006*)0;
LOC29 = genfilenames_561688_839829468(m0);
add_178482_2381377266(&gbreakpoints_548861_839829468, LOC29);
}
LA27: ;
{
NIM_BOOL LOC32;
LOC32 = (NIM_BOOL)0;
LOC32 = (targetos_176629_4151366050 == ((Tsystemos176004) 24));
if (LOC32) goto LA33;
LOC32 = (gselectedgc_169133_2607990831 == ((Tgcmode169080) 0));
LA33: ;
if (!LOC32) goto LA34;
initstackbottomcall0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_490));
}
goto LA30;
LA34: ;
{
TY533289 LOC37;
memset((void*)LOC37, 0, sizeof(LOC37));
initstackbottomcall0 = ropecg_532407_839829468(m0, ((NimStringDesc*) &T839829468_675), LOC37, 0);
}
LA30: ;
(*m0).labels += ((NI) 1);
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = maindatinit_529151_3723162438;
LOC38[1] = gbreakpoints_548861_839829468;
LOC38[2] = othermodsinit_529150_3723162438;
{
NIM_BOOL LOC41;
TY533289 LOC45;
LOC41 = (NIM_BOOL)0;
LOC41 = emulatedthreadvars_532949_839829468();
if (!(LOC41)) goto LA42;
LOC41 = !((targetos_176629_4151366050 == ((Tsystemos176004) 24)));
LA42: ;
if (!LOC41) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC38[3] = ropecg_532407_839829468(m0, ((NimStringDesc*) &T839829468_677), LOC45, 0);
}
goto LA39;
LA43: ;
{
LOC38[3] = rope_178277_2381377266(((NimStringDesc*) &T839829468_490));
}
LA39: ;
LOC38[4] = initstackbottomcall0;
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], ((NimStringDesc*) &T839829468_676), LOC38, 5);
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = mainmodinit_529149_3723162438;
LOC47[1] = initstackbottomcall0;
LOC47[2] = rope_178401_2381377266(((NI64) ((*m0).labels)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], nimmain0, LOC47, 3);
{
TY533289 LOC52;
if (!!(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 20))&63U)))!=0))) goto LA50;
memset((void*)LOC52, 0, sizeof(LOC52));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], othermain0, LOC52, 0);
}
LA50: ;
}
N_NIMCALL(Tnode292802*, myclose_563830_839829468)(Tpasscontext341002* b0, Tnode292802* n0) {
Tnode292802* result0;
Tcgen529027* m0;
{ result0 = (Tnode292802*)0;
result0 = n0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (b0 == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = skipcodegen_341085_2355241294(n0);
LA4: ;
if (!LOC3) goto LA5;
goto BeforeRet;
}
LA5: ;
m0 = ((Tcgen529027*) (b0));
{
if (!!((n0 == NIM_NIL))) goto LA9;
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
genstmts_539244_839829468((*m0).initproc, n0);
}
LA9: ;
registermoduletomain_562243_839829468((*m0).module);
{
Tnode292802* disp0;
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA13;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 5))%(sizeof(NU8)*8));
disp0 = generatemethoddispatchers_432151_3853300031();
{
NI i_563891_839829468;
NI HEX3Atmp_563895_839829468;
NI LOC16;
NI res_563898_839829468;
i_563891_839829468 = (NI)0;
HEX3Atmp_563895_839829468 = (NI)0;
LOC16 = (NI)0;
LOC16 = sonslen_295351_850551059(disp0);
HEX3Atmp_563895_839829468 = (NI)(LOC16 - ((NI) 1));
res_563898_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563898_839829468 <= HEX3Atmp_563895_839829468)) goto LA18;
i_563891_839829468 = res_563898_839829468;
genprocaux_560284_839829468(m0, (*(*disp0).kindU.S6.sons->data[i_563891_839829468]).kindU.S4.sym);
res_563898_839829468 += ((NI) 1);
} LA18: ;
}
}
genmainproc_561729_839829468(m0);
}
LA13: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, finishmodule_563420_839829468)(Tcgen529027* m0) {
NI i0;
i0 = ((NI) 0);
{
while (1) {
Tsym292834* prc0;
if (!(i0 <= ((*m0).forwardedprocs ? ((*m0).forwardedprocs->Sup.len-1) : -1))) goto LA2;
prc0 = (*m0).forwardedprocs->data[i0];
{
NimStringDesc* LOC7;
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 4))&31U)))!=0)) goto LA5;
LOC7 = (NimStringDesc*)0;
LOC7 = rawNewString((*(*prc0).name).s->Sup.len + 17);
appendString(LOC7, ((NimStringDesc*) &T839829468_678));
appendString(LOC7, (*(*prc0).name).s);
internalerror_196100_155036129((*prc0).info, LOC7);
}
LA5: ;
genprocnoforward_560906_839829468(m0, prc0);
i0 += ((NI) 1);
} LA2: ;
}
gforwardedprocscounter_529171_3723162438 -= i0;
(*m0).forwardedprocs = (Tsymseq292804*) setLengthSeq(&((*m0).forwardedprocs)->Sup, sizeof(Tsym292834*), ((NI) 0));
}
N_NIMCALL(void, geninitcode_562286_839829468)(Tcgen529027* m0) {
Ropeobj178006* initname0;
Ropeobj178006* prc0;
TY178507 LOC1;
Ropeobj178006* LOC12;
Ropeobj178006* LOC13;
Ropeobj178006** LOC14;
Ropeobj178006** LOC15;
Ropeobj178006** LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC33;
Ropeobj178006** LOC34;
Ropeobj178006** LOC35;
Ropeobj178006** LOC36;
Ropeobj178006* LOC37;
Ropeobj178006* LOC38;
Ropeobj178006** LOC39;
Ropeobj178006** LOC40;
Ropeobj178006** LOC41;
Ropeobj178006* LOC42;
Ropeobj178006* LOC50;
TY533289 LOC51;
TY178507 LOC52;
TY533289 LOC58;
initname0 = getinitname_562235_839829468((*m0).module);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = initname0;
prc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_679), LOC1, 1);
{
TY532811 LOC6;
if (!(((NI) 0) < (*m0).typenodes)) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = (*m0).typenodesname;
LOC6[1] = rope_178401_2381377266(((NI64) ((*m0).typenodes)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_680), LOC6, 2);
}
LA4: ;
{
TY532811 LOC11;
if (!(((NI) 0) < (*m0).nimtypes)) goto LA9;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = (*m0).nimtypesname;
LOC11[1] = rope_178401_2381377266(((NI64) ((*m0).nimtypes)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_681), LOC11, 2);
}
LA9: ;
LOC12 = (Ropeobj178006*)0;
LOC12 = initgcframe_538435_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC12);
LOC13 = (Ropeobj178006*)0;
LOC13 = gensectionstart_530081_2760143328(((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, LOC13);
LOC14 = (Ropeobj178006**)0;
LOC14 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC14));
LOC15 = (Ropeobj178006**)0;
LOC15 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC15));
LOC16 = (Ropeobj178006**)0;
LOC16 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = gensectionend_530116_2760143328(((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, LOC17);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0);
if (!(LOC20)) goto LA21;
LOC20 = !((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 2))&7U)))!=0));
LA21: ;
if (!LOC20) goto LA22;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 2))%(sizeof(NU8)*8));
{
Ropeobj178006* procname0;
Ropeobj178006* LOC28;
Ropeobj178006* LOC29;
if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 0))&7U)))!=0))) goto LA26;
procname0 = makecstring_191638_155036129((*(*(*m0).module).name).s);
LOC28 = (Ropeobj178006*)0;
LOC28 = quotedfilename_196818_155036129((*(*m0).module).info);
LOC29 = (Ropeobj178006*)0;
LOC29 = initframe_560140_839829468((*m0).initproc, procname0, LOC28);
add_178482_2381377266(&prc0, LOC29);
}
goto LA24;
LA26: ;
{
TY533289 LOC31;
Ropeobj178006* LOC32;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_682), LOC31, 0);
add_178482_2381377266(&prc0, LOC32);
}
LA24: ;
}
LA22: ;
LOC33 = (Ropeobj178006*)0;
LOC33 = gensectionstart_530081_2760143328(((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, LOC33);
LOC34 = (Ropeobj178006**)0;
LOC34 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC34));
LOC35 = (Ropeobj178006**)0;
LOC35 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC35));
LOC36 = (Ropeobj178006**)0;
LOC36 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC36));
LOC37 = (Ropeobj178006*)0;
LOC37 = gensectionend_530116_2760143328(((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, LOC37);
LOC38 = (Ropeobj178006*)0;
LOC38 = gensectionstart_530081_2760143328(((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, LOC38);
LOC39 = (Ropeobj178006**)0;
LOC39 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC39));
LOC40 = (Ropeobj178006**)0;
LOC40 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC40));
LOC41 = (Ropeobj178006**)0;
LOC41 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC41));
LOC42 = (Ropeobj178006*)0;
LOC42 = gensectionend_530116_2760143328(((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, LOC42);
{
NIM_BOOL LOC45;
Ropeobj178006* LOC49;
LOC45 = (NIM_BOOL)0;
LOC45 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0);
if (!(LOC45)) goto LA46;
LOC45 = !((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 0))&7U)))!=0));
LA46: ;
if (!LOC45) goto LA47;
LOC49 = (Ropeobj178006*)0;
LOC49 = deinitframe_560150_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC49);
}
LA47: ;
LOC50 = (Ropeobj178006*)0;
LOC50 = deinitgcframe_538441_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC50);
memset((void*)LOC51, 0, sizeof(LOC51));
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC51, 0);
memset((void*)LOC52, 0, sizeof(LOC52));
LOC52[0] = getdatinitname_562239_839829468((*m0).module);
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_679), LOC52, 1);
{
Tcfilesection529005 i_562401_839829468;
NI res_562482_839829468;
i_562401_839829468 = (Tcfilesection529005)0;
res_562482_839829468 = ((NI) 12);
{
while (1) {
Ropeobj178006* LOC56;
Ropeobj178006* LOC57;
if (!(res_562482_839829468 <= ((NI) 16))) goto LA55;
i_562401_839829468 = ((Tcfilesection529005) (res_562482_839829468));
LOC56 = (Ropeobj178006*)0;
LOC56 = gensectionstart_530015_2760143328(i_562401_839829468);
add_178482_2381377266(&prc0, LOC56);
add_178482_2381377266(&prc0, (*m0).s[(i_562401_839829468)- 0]);
LOC57 = (Ropeobj178006*)0;
LOC57 = gensectionend_530050_2760143328(i_562401_839829468);
add_178482_2381377266(&prc0, LOC57);
res_562482_839829468 += ((NI) 1);
} LA55: ;
}
}
memset((void*)LOC58, 0, sizeof(LOC58));
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC58, 0);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 11))- 0], prc0);
{
NIM_CHAR i_562442_839829468;
Ropeobj178006* el_562443_839829468;
TY529136 HEX3Atmp_562487_839829468;
NIM_CHAR i_562490_839829468;
i_562442_839829468 = (NIM_CHAR)0;
el_562443_839829468 = (Ropeobj178006*)0;
memset((void*)HEX3Atmp_562487_839829468, 0, sizeof(HEX3Atmp_562487_839829468));
memcpy((void*)HEX3Atmp_562487_839829468, (NIM_CONST void*)(*m0).extensionloaders, sizeof(HEX3Atmp_562487_839829468));
i_562490_839829468 = 48;
{
if (!((NU8)(((NIM_CHAR) (((NU8)(i_562490_839829468))))) <= (NU8)(57))) goto LA62;
{
while (1) {
i_562442_839829468 = i_562490_839829468;
el_562443_839829468 = HEX3Atmp_562487_839829468[(((NU8)(i_562490_839829468)))- 48];
{
Ropeobj178006* ex0;
TY532811 LOC70;
if (!!((el_562443_839829468 == NIM_NIL))) goto LA68;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC70[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (((NU8)(i_562442_839829468)))) - ((NI) 48)))));
LOC70[1] = el_562443_839829468;
ex0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_684), LOC70, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 11))- 0], ex0);
}
LA68: ;
{
if (!((NU8)(57) <= (NU8)(((NIM_CHAR) (((NU8)(i_562490_839829468))))))) goto LA73;
goto LA64;
}
LA73: ;
i_562490_839829468 += ((NI) 1);
}
} LA64: ;
}
LA62: ;
}
}
N_NIMCALL(void, finishtypedescriptions_535842_839829468)(Tcgen529027* m0) {
NI i0;
i0 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC3;
if (!(i0 < ((*m0).typestack ? (*m0).typestack->Sup.len : 0))) goto LA2;
LOC3 = (Ropeobj178006*)0;
LOC3 = gettypedesc_535671_839829468(m0, (*m0).typestack->data[i0]);
i0 += ((NI) 1);
} LA2: ;
}
}
N_NIMCALL(Ropeobj178006*, getcopyright_561665_839829468)(NimStringDesc* cfile0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 4))&63U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178277_2381377266(((NimStringDesc*) &T839829468_686));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_685), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY536475 LOC7;
NimStringDesc* LOC8;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rope_178277_2381377266(((NimStringDesc*) &T839829468_686));
LOC7[1] = rope_178277_2381377266(Os_176068_4151366050[(targetos_176629_4151366050)- 1].Field0);
LOC7[2] = rope_178277_2381377266(Cpu_176496_4151366050[(targetcpu_176627_4151366050)- 1].Field0);
LOC7[3] = rope_178277_2381377266(Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field0);
LOC8 = (NimStringDesc*)0;
LOC8 = getcompilecfilecmd_274284_2528170400(cfile0, NIM_FALSE);
LOC7[4] = rope_178277_2381377266(LOC8);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_687), LOC7, 5);
}
LA1: ;
return result0;
}
static N_INLINE(void, addinttypes_561659_839829468)(Ropeobj178006** result0) {
NimStringDesc* LOC1;
TY178507 LOC2;
LOC1 = (NimStringDesc*)0;
LOC1 = rawNewString(tnl_176644_4151366050->Sup.len + 22);
appendString(LOC1, ((NimStringDesc*) &T839829468_688));
appendString(LOC1, tnl_176644_4151366050);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rope_178401_2381377266(((NI64) (Cpu_176496_4151366050[(targetcpu_176627_4151366050)- 1].Field1)));
addf_179205_2381377266(result0, LOC1, LOC2, 1);
}
N_NIMCALL(Ropeobj178006*, getfileheader_561683_839829468)(NimStringDesc* cfile0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getcopyright_561665_839829468(cfile0);
addinttypes_561659_839829468(&result0);
return result0;
}
N_NIMCALL(void, generatethreadlocalstorage_538717_839829468)(Tcgen529027* m0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY178507 LOC13;
LOC3 = (NIM_BOOL)0;
LOC3 = !((nimtv_538656_839829468 == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*m0).flags &(1U<<((NU)(((Codegenflag529025) 1))&7U)))!=0);
if (LOC5) goto LA6;
LOC5 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
{
Ttype292840* t_538761_839829468;
NI i_538768_839829468;
NI L_538770_839829468;
t_538761_839829468 = (Ttype292840*)0;
i_538768_839829468 = ((NI) 0);
L_538770_839829468 = (nimtvdeps_538674_839829468 ? nimtvdeps_538674_839829468->Sup.len : 0);
{
while (1) {
Ropeobj178006* LOC12;
if (!(i_538768_839829468 < L_538770_839829468)) goto LA11;
t_538761_839829468 = nimtvdeps_538674_839829468->data[i_538768_839829468];
LOC12 = (Ropeobj178006*)0;
LOC12 = gettypedesc_535671_839829468(m0, t_538761_839829468);
i_538768_839829468 += ((NI) 1);
} LA11: ;
}
}
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = nimtv_538656_839829468;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 4))- 0], ((NimStringDesc*) &T839829468_689), LOC13, 1);
}
LA7: ;
}
N_NIMCALL(void, generateheaders_560104_839829468)(Tcgen529027* m0) {
NimStringDesc* LOC1;
Tstrentry147009* it0;
LOC1 = (NimStringDesc*)0;
LOC1 = rawNewString(tnl_176644_4151366050->Sup.len + tnl_176644_4151366050->Sup.len + 20);
appendString(LOC1, tnl_176644_4151366050);
appendString(LOC1, ((NimStringDesc*) &T839829468_690));
appendString(LOC1, tnl_176644_4151366050);
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], LOC1);
it0 = ((Tstrentry147009*) ((*m0).headerfiles.head));
{
while (1) {
if (!!((it0 == NIM_NIL))) goto LA3;
{
NimStringDesc* LOC8;
NimStringDesc* LOC9;
Ropeobj178006* LOC10;
if (!((NU8)((*it0).data->data[((NI) 0)]) == (NU8)(35))) goto LA6;
LOC8 = (NimStringDesc*)0;
LOC9 = (NimStringDesc*)0;
LOC9 = nsuReplaceChar((*it0).data, 96, 34);
LOC8 = rawNewString(LOC9->Sup.len + tnl_176644_4151366050->Sup.len + 0);
appendString(LOC8, LOC9);
appendString(LOC8, tnl_176644_4151366050);
LOC10 = (Ropeobj178006*)0;
LOC10 = rope_178277_2381377266(LOC8);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], LOC10);
}
goto LA4;
LA6: ;
{
TY178507 LOC14;
if (!!((((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(34)) || ((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(60))))) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178277_2381377266((*it0).data);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], ((NimStringDesc*) &T839829468_691), LOC14, 1);
}
goto LA4;
LA12: ;
{
TY178507 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rope_178277_2381377266((*it0).data);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], ((NimStringDesc*) &T839829468_692), LOC16, 1);
}
LA4: ;
it0 = ((Tstrentry147009*) ((*it0).Sup.next));
} LA3: ;
}
}
N_NIMCALL(Ropeobj178006*, genmodule_562491_839829468)(Tcgen529027* m0, NimStringDesc* cfile0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
result0 = getfileheader_561683_839829468(cfile0);
LOC1 = (Ropeobj178006*)0;
LOC1 = genmergeinfo_530203_2760143328(m0);
add_178482_2381377266(&result0, LOC1);
generatethreadlocalstorage_538717_839829468(m0);
generateheaders_560104_839829468(m0);
{
Tcfilesection529005 i_562614_839829468;
NI res_562622_839829468;
i_562614_839829468 = (Tcfilesection529005)0;
res_562622_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC5;
Ropeobj178006* LOC6;
if (!(res_562622_839829468 <= ((NI) 10))) goto LA4;
i_562614_839829468 = ((Tcfilesection529005) (res_562622_839829468));
LOC5 = (Ropeobj178006*)0;
LOC5 = gensectionstart_530015_2760143328(i_562614_839829468);
add_178482_2381377266(&result0, LOC5);
add_178482_2381377266(&result0, (*m0).s[(i_562614_839829468)- 0]);
LOC6 = (Ropeobj178006*)0;
LOC6 = gensectionend_530050_2760143328(i_562614_839829468);
add_178482_2381377266(&result0, LOC6);
res_562622_839829468 += ((NI) 1);
} LA4: ;
}
}
add_178482_2381377266(&result0, (*m0).s[(((Tcfilesection529005) 11))- 0]);
return result0;
}
N_NIMCALL(void, updatecachedmodule_563813_839829468)(Tcgen529027* m0) {
NimStringDesc* cfile0;
NimStringDesc* cfilenoext0;
cfile0 = getcfile_563204_839829468(m0);
cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490));
{
NIM_BOOL LOC3;
Ropeobj178006* code0;
LOC3 = (NIM_BOOL)0;
LOC3 = mergerequired_530832_2760143328(m0);
if (!(LOC3)) goto LA4;
LOC3 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0));
LA4: ;
if (!LOC3) goto LA5;
mergefiles_531241_2760143328(cfile0, m0);
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
code0 = genmodule_562491_839829468(m0, cfile0);
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
addfiletocompile_273863_2528170400(cfile0);
}
LA5: ;
addfiletolink_273872_2528170400(cfilenoext0);
}
N_NIMCALL(void, generatethreadvarssize_538771_839829468)(Tcgen529027* m0) {
{
NimStringDesc* externc0;
TY178507 LOC12;
if (!!((nimtv_538656_839829468 == NIM_NIL))) goto LA3;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = !((gcmd_169132_2607990831 == ((Tcommands169076) 2)));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
externc0 = copyString(((NimStringDesc*) &T839829468_693));
}
goto LA5;
LA9: ;
{
externc0 = copyString(((NimStringDesc*) &T839829468_490));
}
LA5: ;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178277_2381377266(externc0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], ((NimStringDesc*) &T839829468_694), LOC12, 1);
}
LA3: ;
}
N_NIMCALL(NIM_BOOL, shouldrecompile_563621_839829468)(Ropeobj178006* code0, NimStringDesc* cfile0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
result0 = NIM_TRUE;
{
NimStringDesc* objfile0;
if (!!(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 1))&63U)))!=0))) goto LA3;
objfile0 = toobjfile_273859_2528170400(cfile0);
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = writeropeifnotequal_179511_2381377266(code0, cfile0);
if (!LOC7) goto LA8;
goto BeforeRet;
}
LA8: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = nosexistsFile(objfile0);
if (!(LOC12)) goto LA13;
LOC12 = nosfileNewer(objfile0, cfile0);
LA13: ;
if (!LOC12) goto LA14;
result0 = NIM_FALSE;
}
LA14: ;
}
goto LA1;
LA3: ;
{
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
}
LA1: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, writemodule_563637_839829468)(Tcgen529027* m0, NIM_BOOL pending0) {
NimStringDesc* cfile0;
NimStringDesc* cfilenoext0;
cfile0 = getcfile_563204_839829468(m0);
cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490));
{
NIM_BOOL LOC3;
Ropeobj178006* code0;
LOC3 = (NIM_BOOL)0;
LOC3 = !((*m0).Sup.fromcache);
if (LOC3) goto LA4;
LOC3 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 1))&63U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA9;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], mainmodprocs_529148_3723162438);
generatethreadvarssize_538771_839829468(m0);
}
LA9: ;
code0 = genmodule_562491_839829468(m0, cfile0);
{
NIM_BOOL LOC13;
LOC13 = (NIM_BOOL)0;
LOC13 = shouldrecompile_563621_839829468(code0, cfile0);
if (!LOC13) goto LA14;
addfiletocompile_273863_2528170400(cfile0);
}
LA14: ;
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
Ropeobj178006* code0;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = pending0;
if (!(LOC18)) goto LA19;
LOC18 = mergerequired_530832_2760143328(m0);
LA19: ;
LOC17 = LOC18;
if (!(LOC17)) goto LA20;
LOC17 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0));
LA20: ;
if (!LOC17) goto LA21;
mergefiles_531241_2760143328(cfile0, m0);
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
code0 = genmodule_562491_839829468(m0, cfile0);
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
addfiletocompile_273863_2528170400(cfile0);
}
goto LA1;
LA21: ;
{
NimStringDesc* LOC24;
NIM_BOOL LOC25;
LOC24 = (NimStringDesc*)0;
LOC24 = toobjfile_273859_2528170400(cfilenoext0);
LOC25 = (NIM_BOOL)0;
LOC25 = nosexistsFile(LOC24);
if (!!(LOC25)) goto LA26;
addfiletocompile_273863_2528170400(cfile0);
}
goto LA1;
LA26: ;
LA1: ;
addfiletolink_273872_2528170400(cfilenoext0);
}
N_NIMCALL(void, writeheader_563152_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* guard0;
TY178507 LOC1;
TY128506 LOC2;
TY178507 LOC3;
TY533289 LOC13;
TY178507 LOC14;
result0 = getcopyright_561665_839829468((*m0).filename);
memset((void*)LOC1, 0, sizeof(LOC1));
memset((void*)(&LOC2), 0, sizeof(LOC2));
nossplitFile((*m0).filename, (&LOC2));
LOC1[0] = rope_178277_2381377266(LOC2.Field1);
guard0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_695), LOC1, 1);
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = guard0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_696), LOC3, 1);
addinttypes_561659_839829468(&result0);
generateheaders_560104_839829468(m0);
generatethreadlocalstorage_538717_839829468(m0);
{
Tcfilesection529005 i_563174_839829468;
NI res_563200_839829468;
i_563174_839829468 = (Tcfilesection529005)0;
res_563200_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
if (!(res_563200_839829468 <= ((NI) 10))) goto LA6;
i_563174_839829468 = ((Tcfilesection529005) (res_563200_839829468));
LOC7 = (Ropeobj178006*)0;
LOC7 = gensectionstart_530015_2760143328(i_563174_839829468);
add_178482_2381377266(&result0, LOC7);
add_178482_2381377266(&result0, (*m0).s[(i_563174_839829468)- 0]);
LOC8 = (Ropeobj178006*)0;
LOC8 = gensectionend_530050_2760143328(i_563174_839829468);
add_178482_2381377266(&result0, LOC8);
res_563200_839829468 += ((NI) 1);
} LA6: ;
}
}
add_178482_2381377266(&result0, (*m0).s[(((Tcfilesection529005) 11))- 0]);
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 8))&63U)))!=0)) goto LA11;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_22));
}
LA11: ;
memset((void*)LOC13, 0, sizeof(LOC13));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_697), LOC13, 0);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = guard0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_698), LOC14, 1);
writerope_178836_2381377266(result0, (*m0).filename, NIM_FALSE);
}
N_NIMCALL(void, cgenwritemodules_563902_839829468)(void) {
{
if (!!((generatedheader_532201_839829468 == NIM_NIL))) goto LA3;
finishmodule_563420_839829468(generatedheader_532201_839829468);
}
LA3: ;
{
while (1) {
if (!(((NI) 0) < gforwardedprocscounter_529171_3723162438)) goto LA6;
{
Tcgen529027* m_563916_839829468;
m_563916_839829468 = (Tcgen529027*)0;
{
NI i_563935_839829468;
NI HEX3Atmp_563937_839829468;
NI res_563939_839829468;
i_563935_839829468 = (NI)0;
HEX3Atmp_563937_839829468 = (NI)0;
HEX3Atmp_563937_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563939_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563939_839829468 <= HEX3Atmp_563937_839829468)) goto LA10;
i_563935_839829468 = res_563939_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563935_839829468] == NIM_NIL))) goto LA13;
m_563916_839829468 = gmodules_529170_3723162438->data[i_563935_839829468];
{
if (!!((*m_563916_839829468).Sup.fromcache)) goto LA17;
finishmodule_563420_839829468(m_563916_839829468);
}
LA17: ;
}
LA13: ;
res_563939_839829468 += ((NI) 1);
} LA10: ;
}
}
}
} LA6: ;
}
{
Tcgen529027* m_563917_839829468;
m_563917_839829468 = (Tcgen529027*)0;
{
NI i_563946_839829468;
NI HEX3Atmp_563948_839829468;
NI res_563950_839829468;
i_563946_839829468 = (NI)0;
HEX3Atmp_563948_839829468 = (NI)0;
HEX3Atmp_563948_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563950_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563950_839829468 <= HEX3Atmp_563948_839829468)) goto LA22;
i_563946_839829468 = res_563950_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563946_839829468] == NIM_NIL))) goto LA25;
m_563917_839829468 = gmodules_529170_3723162438->data[i_563946_839829468];
{
if (!(*m_563917_839829468).Sup.fromcache) goto LA29;
updatecachedmodule_563813_839829468(m_563917_839829468);
}
goto LA27;
LA29: ;
{
writemodule_563637_839829468(m_563917_839829468, NIM_TRUE);
}
LA27: ;
}
LA25: ;
res_563950_839829468 += ((NI) 1);
} LA22: ;
}
}
}
writemapping_274789_2528170400(gmapping_529152_3723162438);
{
if (!!((generatedheader_532201_839829468 == NIM_NIL))) goto LA34;
writeheader_563152_839829468(generatedheader_532201_839829468);
}
LA34: ;
}
N_NIMCALL(void, nullify_562833_839829468)(Ropeobj178006** arr0) {
{
Tcfilesection529005 i_562848_839829468;
NI res_562853_839829468;
i_562848_839829468 = (Tcfilesection529005)0;
res_562853_839829468 = ((NI) 0);
{
while (1) {
if (!(res_562853_839829468 <= ((NI) 17))) goto LA3;
i_562848_839829468 = ((Tcfilesection529005) (res_562853_839829468));
unsureAsgnRef((void**) (&arr0[(i_562848_839829468)- 0]), NIM_NIL);
res_562853_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, nullify_562858_839829468)(Ropeobj178006** arr0) {
{
NIM_CHAR i_563014_839829468;
NI res_563019_839829468;
i_563014_839829468 = (NIM_CHAR)0;
res_563019_839829468 = ((NI) 48);
{
while (1) {
if (!(res_563019_839829468 <= ((NI) 57))) goto LA3;
i_563014_839829468 = ((NIM_CHAR) (res_563019_839829468));
unsureAsgnRef((void**) (&arr0[(((NU8)(i_563014_839829468)))- 48]), NIM_NIL);
res_563019_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, resetmodule_562763_839829468)(Tcgen529027* m0) {
initlinkedlist_147031_3771138726((&(*m0).headerfiles));
initintset_268885_2627731572((&(*m0).declaredprotos));
initidtable_296019_850551059((&(*m0).forwtypecache));
asgnRef((void**) (&(*m0).initproc), newproc_529206_3723162438(NIM_NIL, m0));
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
asgnRef((void**) (&(*m0).preinitproc), newpreinitproc_562625_839829468(m0));
asgnRef((void**) (&(*m0).postinitproc), newpostinitproc_562630_839829468(m0));
initnodetable_296085_850551059((&(*m0).datacache));
if ((*m0).typestack) nimGCunrefNoCycle((*m0).typestack);
(*m0).typestack = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
if ((*m0).forwardedprocs) nimGCunrefNoCycle((*m0).forwardedprocs);
(*m0).forwardedprocs = (Tsymseq292804*) newSeqRC1((&NTI292804), 0);
asgnRefNoCycle((void**) (&(*m0).typenodesname), gettempname_533596_839829468(m0));
asgnRefNoCycle((void**) (&(*m0).nimtypesname), gettempname_533596_839829468(m0));
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA3;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 0))%(sizeof(NU8)*8));
}
goto LA1;
LA3: ;
{
(*m0).flags &= ~(((NU8)1) << ((((Codegenflag529025) 0)) % (sizeof(NU8)*8)));
}
LA1: ;
nullify_562833_839829468((*m0).s);
(*m0).typenodes = ((NI) 0);
(*m0).nimtypes = ((NI) 0);
nullify_562858_839829468((*m0).extensionloaders);
(*m0).Sup.fromcache = NIM_TRUE;
}
N_NIMCALL(void, resetcgenmodules_563024_839829468)(void) {
{
Tcgen529027* m_563026_839829468;
m_563026_839829468 = (Tcgen529027*)0;
{
NI i_563031_839829468;
NI HEX3Atmp_563033_839829468;
NI res_563035_839829468;
i_563031_839829468 = (NI)0;
HEX3Atmp_563033_839829468 = (NI)0;
HEX3Atmp_563033_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563035_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563035_839829468 <= HEX3Atmp_563033_839829468)) goto LA4;
i_563031_839829468 = res_563035_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563031_839829468] == NIM_NIL))) goto LA7;
m_563026_839829468 = gmodules_529170_3723162438->data[i_563031_839829468];
resetmodule_562763_839829468(m_563026_839829468);
}
LA7: ;
res_563035_839829468 += ((NI) 1);
} LA4: ;
}
}
}
}
NIM_EXTERNC N_NOINLINE(void, compiler_cgenInit000)(void) {
nimRegisterGlobalMarker(T839829468_2);
nimRegisterGlobalMarker(T839829468_3);
nimRegisterGlobalMarker(T839829468_5);
nimRegisterGlobalMarker(T839829468_6);
nimRegisterGlobalMarker(T839829468_7);
nimRegisterGlobalMarker(T839829468_8);
asgnRefNoCycle((void**) (&indent_532655_839829468), rope_178277_2381377266(((NimStringDesc*) &T839829468_4)));
if (nimtvdeps_538674_839829468) nimGCunrefNoCycle(nimtvdeps_538674_839829468);
nimtvdeps_538674_839829468 = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
chckNil((void*)(&nimtvdeclared_538675_839829468));
genericReset((void*)(&nimtvdeclared_538675_839829468), (&NTI268030));
initintset_268885_2627731572((&nimtvdeclared_538675_839829468));
breakpointid_548860_839829468 = ((NI) 0);
}
NIM_EXTERNC N_NOINLINE(void, compiler_cgenDatInit000)(void) {
}
|
conv2d.c | #include "conv2d.h"
int nodal_flux(double c, double u, double v,
double *E, double *G)
{
*E = u * c;
*G = v * c;
return 0;
}
void upwind_flux(double f_M, double f_P, double uM, double vM,
double nx, double ny, double *numflux)
{
const double unM = uM * nx + vM * ny;
if (unM > 0)
{
*numflux = f_M * unM;
}
else
{
*numflux = f_P * unM;
}
return;
}
int bound_cond(double varM, double varP, double f_ext,
double nx, double ny, bc_type type,
double *f_P)
{
switch (type)
{
case Inner:
case SlipWall:
case NSlipWall:
*f_P = varP;
break;
case ZeroGrad:
*f_P = varM;
break;
case Clamped:
*f_P = f_ext;
break;
case ClampedDepth:
case ClampedVel:
case Flather:
*f_P = 2 * f_ext - varM;
break;
}
return 0;
}
/*
* @brief double precision vector multiply operation.
* t = x .* y
*/
void dvecm(double N, double alpha, double *x, double *y, double *t)
{
int i;
// #ifdef _OPENMP
// #pragma omp parallel for num_threads(DG_THREADS)
// #endif
for (i = 0; i < N; i++)
{
t[i] = t[i] + alpha * x[i] * y[i];
}
}
/*
* @brief double precision vector divide operation.
* t = x .* y
*/
void dvecd(double N, double alpha, double *x, double *y, double *t)
{
int i;
// #ifdef _OPENMP
// #pragma omp parallel for num_threads(DG_THREADS)
// #endif
for (i = 0; i < N; i++)
{
t[i] = t[i] + alpha * x[i] / y[i];
}
} |
DRB112-linear-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
omp for loop is allowed to use the linear clause, an OpenMP 4.5 addition.
*/
#include <stdio.h>
int main()
{
int len=100;
double a[len], b[len], c[len];
int i,j=0;
#pragma omp parallel for
for (i=0;i<len;i++)
{
a[i]=((double)i)/2.0;
b[i]=((double)i)/3.0;
c[i]=((double)i)/7.0;
}
#pragma omp parallel for linear(j)
for (i=0;i<len;i++)
{
c[j]+=a[i]*b[i];
j++;
}
printf ("c[50]=%f\n",c[50]);
return 0;
}
|
parser.c | /* -*- C++ -*- Parser.
Copyright (C) 2000-2015 Free Software Foundation, Inc.
Written by Mark Mitchell <mark@codesourcery.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "timevar.h"
#include "cpplib.h"
#include "hash-set.h"
#include "machmode.h"
#include "vec.h"
#include "double-int.h"
#include "input.h"
#include "alias.h"
#include "symtab.h"
#include "wide-int.h"
#include "inchash.h"
#include "tree.h"
#include "print-tree.h"
#include "stringpool.h"
#include "attribs.h"
#include "trans-mem.h"
#include "cp-tree.h"
#include "intl.h"
#include "c-family/c-pragma.h"
#include "decl.h"
#include "flags.h"
#include "diagnostic-core.h"
#include "target.h"
#include "hash-map.h"
#include "is-a.h"
#include "plugin-api.h"
#include "hard-reg-set.h"
#include "input.h"
#include "function.h"
#include "ipa-ref.h"
#include "cgraph.h"
#include "c-family/c-common.h"
#include "c-family/c-objc.h"
#include "plugin.h"
#include "tree-pretty-print.h"
#include "parser.h"
#include "type-utils.h"
#include "omp-low.h"
#include "gomp-constants.h"
/* The lexer. */
/* The cp_lexer_* routines mediate between the lexer proper (in libcpp
and c-lex.c) and the C++ parser. */
static cp_token eof_token =
{
CPP_EOF, RID_MAX, 0, PRAGMA_NONE, false, false, false, 0, { NULL }
};
/* The various kinds of non integral constant we encounter. */
typedef enum non_integral_constant {
NIC_NONE,
/* floating-point literal */
NIC_FLOAT,
/* %<this%> */
NIC_THIS,
/* %<__FUNCTION__%> */
NIC_FUNC_NAME,
/* %<__PRETTY_FUNCTION__%> */
NIC_PRETTY_FUNC,
/* %<__func__%> */
NIC_C99_FUNC,
/* "%<va_arg%> */
NIC_VA_ARG,
/* a cast */
NIC_CAST,
/* %<typeid%> operator */
NIC_TYPEID,
/* non-constant compound literals */
NIC_NCC,
/* a function call */
NIC_FUNC_CALL,
/* an increment */
NIC_INC,
/* an decrement */
NIC_DEC,
/* an array reference */
NIC_ARRAY_REF,
/* %<->%> */
NIC_ARROW,
/* %<.%> */
NIC_POINT,
/* the address of a label */
NIC_ADDR_LABEL,
/* %<*%> */
NIC_STAR,
/* %<&%> */
NIC_ADDR,
/* %<++%> */
NIC_PREINCREMENT,
/* %<--%> */
NIC_PREDECREMENT,
/* %<new%> */
NIC_NEW,
/* %<delete%> */
NIC_DEL,
/* calls to overloaded operators */
NIC_OVERLOADED,
/* an assignment */
NIC_ASSIGNMENT,
/* a comma operator */
NIC_COMMA,
/* a call to a constructor */
NIC_CONSTRUCTOR,
/* a transaction expression */
NIC_TRANSACTION
} non_integral_constant;
/* The various kinds of errors about name-lookup failing. */
typedef enum name_lookup_error {
/* NULL */
NLE_NULL,
/* is not a type */
NLE_TYPE,
/* is not a class or namespace */
NLE_CXX98,
/* is not a class, namespace, or enumeration */
NLE_NOT_CXX98
} name_lookup_error;
/* The various kinds of required token */
typedef enum required_token {
RT_NONE,
RT_SEMICOLON, /* ';' */
RT_OPEN_PAREN, /* '(' */
RT_CLOSE_BRACE, /* '}' */
RT_OPEN_BRACE, /* '{' */
RT_CLOSE_SQUARE, /* ']' */
RT_OPEN_SQUARE, /* '[' */
RT_COMMA, /* ',' */
RT_SCOPE, /* '::' */
RT_LESS, /* '<' */
RT_GREATER, /* '>' */
RT_EQ, /* '=' */
RT_ELLIPSIS, /* '...' */
RT_MULT, /* '*' */
RT_COMPL, /* '~' */
RT_COLON, /* ':' */
RT_COLON_SCOPE, /* ':' or '::' */
RT_CLOSE_PAREN, /* ')' */
RT_COMMA_CLOSE_PAREN, /* ',' or ')' */
RT_PRAGMA_EOL, /* end of line */
RT_NAME, /* identifier */
/* The type is CPP_KEYWORD */
RT_NEW, /* new */
RT_DELETE, /* delete */
RT_RETURN, /* return */
RT_WHILE, /* while */
RT_EXTERN, /* extern */
RT_STATIC_ASSERT, /* static_assert */
RT_DECLTYPE, /* decltype */
RT_OPERATOR, /* operator */
RT_CLASS, /* class */
RT_TEMPLATE, /* template */
RT_NAMESPACE, /* namespace */
RT_USING, /* using */
RT_ASM, /* asm */
RT_TRY, /* try */
RT_CATCH, /* catch */
RT_THROW, /* throw */
RT_LABEL, /* __label__ */
RT_AT_TRY, /* @try */
RT_AT_SYNCHRONIZED, /* @synchronized */
RT_AT_THROW, /* @throw */
RT_SELECT, /* selection-statement */
RT_INTERATION, /* iteration-statement */
RT_JUMP, /* jump-statement */
RT_CLASS_KEY, /* class-key */
RT_CLASS_TYPENAME_TEMPLATE, /* class, typename, or template */
RT_TRANSACTION_ATOMIC, /* __transaction_atomic */
RT_TRANSACTION_RELAXED, /* __transaction_relaxed */
RT_TRANSACTION_CANCEL /* __transaction_cancel */
} required_token;
/* Prototypes. */
static cp_lexer *cp_lexer_new_main
(void);
static cp_lexer *cp_lexer_new_from_tokens
(cp_token_cache *tokens);
static void cp_lexer_destroy
(cp_lexer *);
static int cp_lexer_saving_tokens
(const cp_lexer *);
static cp_token *cp_lexer_token_at
(cp_lexer *, cp_token_position);
static void cp_lexer_get_preprocessor_token
(cp_lexer *, cp_token *);
static inline cp_token *cp_lexer_peek_token
(cp_lexer *);
static cp_token *cp_lexer_peek_nth_token
(cp_lexer *, size_t);
static inline bool cp_lexer_next_token_is
(cp_lexer *, enum cpp_ttype);
static bool cp_lexer_next_token_is_not
(cp_lexer *, enum cpp_ttype);
static bool cp_lexer_next_token_is_keyword
(cp_lexer *, enum rid);
static cp_token *cp_lexer_consume_token
(cp_lexer *);
static void cp_lexer_purge_token
(cp_lexer *);
static void cp_lexer_purge_tokens_after
(cp_lexer *, cp_token_position);
static void cp_lexer_save_tokens
(cp_lexer *);
static void cp_lexer_commit_tokens
(cp_lexer *);
static void cp_lexer_rollback_tokens
(cp_lexer *);
static void cp_lexer_print_token
(FILE *, cp_token *);
static inline bool cp_lexer_debugging_p
(cp_lexer *);
static void cp_lexer_start_debugging
(cp_lexer *) ATTRIBUTE_UNUSED;
static void cp_lexer_stop_debugging
(cp_lexer *) ATTRIBUTE_UNUSED;
static cp_token_cache *cp_token_cache_new
(cp_token *, cp_token *);
static void cp_parser_initial_pragma
(cp_token *);
static tree cp_literal_operator_id
(const char *);
static void cp_parser_cilk_simd
(cp_parser *, cp_token *);
static tree cp_parser_cilk_for
(cp_parser *, tree);
static bool cp_parser_omp_declare_reduction_exprs
(tree, cp_parser *);
static tree cp_parser_cilk_simd_vectorlength
(cp_parser *, tree, bool);
/* Manifest constants. */
#define CP_LEXER_BUFFER_SIZE ((256 * 1024) / sizeof (cp_token))
#define CP_SAVED_TOKEN_STACK 5
/* Variables. */
/* The stream to which debugging output should be written. */
static FILE *cp_lexer_debug_stream;
/* Nonzero if we are parsing an unevaluated operand: an operand to
sizeof, typeof, or alignof. */
int cp_unevaluated_operand;
/* Dump up to NUM tokens in BUFFER to FILE starting with token
START_TOKEN. If START_TOKEN is NULL, the dump starts with the
first token in BUFFER. If NUM is 0, dump all the tokens. If
CURR_TOKEN is set and it is one of the tokens in BUFFER, it will be
highlighted by surrounding it in [[ ]]. */
static void
cp_lexer_dump_tokens (FILE *file, vec<cp_token, va_gc> *buffer,
cp_token *start_token, unsigned num,
cp_token *curr_token)
{
unsigned i, nprinted;
cp_token *token;
bool do_print;
fprintf (file, "%u tokens\n", vec_safe_length (buffer));
if (buffer == NULL)
return;
if (num == 0)
num = buffer->length ();
if (start_token == NULL)
start_token = buffer->address ();
if (start_token > buffer->address ())
{
cp_lexer_print_token (file, &(*buffer)[0]);
fprintf (file, " ... ");
}
do_print = false;
nprinted = 0;
for (i = 0; buffer->iterate (i, &token) && nprinted < num; i++)
{
if (token == start_token)
do_print = true;
if (!do_print)
continue;
nprinted++;
if (token == curr_token)
fprintf (file, "[[");
cp_lexer_print_token (file, token);
if (token == curr_token)
fprintf (file, "]]");
switch (token->type)
{
case CPP_SEMICOLON:
case CPP_OPEN_BRACE:
case CPP_CLOSE_BRACE:
case CPP_EOF:
fputc ('\n', file);
break;
default:
fputc (' ', file);
}
}
if (i == num && i < buffer->length ())
{
fprintf (file, " ... ");
cp_lexer_print_token (file, &buffer->last ());
}
fprintf (file, "\n");
}
/* Dump all tokens in BUFFER to stderr. */
void
cp_lexer_debug_tokens (vec<cp_token, va_gc> *buffer)
{
cp_lexer_dump_tokens (stderr, buffer, NULL, 0, NULL);
}
DEBUG_FUNCTION void
debug (vec<cp_token, va_gc> &ref)
{
cp_lexer_dump_tokens (stderr, &ref, NULL, 0, NULL);
}
DEBUG_FUNCTION void
debug (vec<cp_token, va_gc> *ptr)
{
if (ptr)
debug (*ptr);
else
fprintf (stderr, "<nil>\n");
}
/* Dump the cp_parser tree field T to FILE if T is non-NULL. DESC is the
description for T. */
static void
cp_debug_print_tree_if_set (FILE *file, const char *desc, tree t)
{
if (t)
{
fprintf (file, "%s: ", desc);
print_node_brief (file, "", t, 0);
}
}
/* Dump parser context C to FILE. */
static void
cp_debug_print_context (FILE *file, cp_parser_context *c)
{
const char *status_s[] = { "OK", "ERROR", "COMMITTED" };
fprintf (file, "{ status = %s, scope = ", status_s[c->status]);
print_node_brief (file, "", c->object_type, 0);
fprintf (file, "}\n");
}
/* Print the stack of parsing contexts to FILE starting with FIRST. */
static void
cp_debug_print_context_stack (FILE *file, cp_parser_context *first)
{
unsigned i;
cp_parser_context *c;
fprintf (file, "Parsing context stack:\n");
for (i = 0, c = first; c; c = c->next, i++)
{
fprintf (file, "\t#%u: ", i);
cp_debug_print_context (file, c);
}
}
/* Print the value of FLAG to FILE. DESC is a string describing the flag. */
static void
cp_debug_print_flag (FILE *file, const char *desc, bool flag)
{
if (flag)
fprintf (file, "%s: true\n", desc);
}
/* Print an unparsed function entry UF to FILE. */
static void
cp_debug_print_unparsed_function (FILE *file, cp_unparsed_functions_entry *uf)
{
unsigned i;
cp_default_arg_entry *default_arg_fn;
tree fn;
fprintf (file, "\tFunctions with default args:\n");
for (i = 0;
vec_safe_iterate (uf->funs_with_default_args, i, &default_arg_fn);
i++)
{
fprintf (file, "\t\tClass type: ");
print_node_brief (file, "", default_arg_fn->class_type, 0);
fprintf (file, "\t\tDeclaration: ");
print_node_brief (file, "", default_arg_fn->decl, 0);
fprintf (file, "\n");
}
fprintf (file, "\n\tFunctions with definitions that require "
"post-processing\n\t\t");
for (i = 0; vec_safe_iterate (uf->funs_with_definitions, i, &fn); i++)
{
print_node_brief (file, "", fn, 0);
fprintf (file, " ");
}
fprintf (file, "\n");
fprintf (file, "\n\tNon-static data members with initializers that require "
"post-processing\n\t\t");
for (i = 0; vec_safe_iterate (uf->nsdmis, i, &fn); i++)
{
print_node_brief (file, "", fn, 0);
fprintf (file, " ");
}
fprintf (file, "\n");
}
/* Print the stack of unparsed member functions S to FILE. */
static void
cp_debug_print_unparsed_queues (FILE *file,
vec<cp_unparsed_functions_entry, va_gc> *s)
{
unsigned i;
cp_unparsed_functions_entry *uf;
fprintf (file, "Unparsed functions\n");
for (i = 0; vec_safe_iterate (s, i, &uf); i++)
{
fprintf (file, "#%u:\n", i);
cp_debug_print_unparsed_function (file, uf);
}
}
/* Dump the tokens in a window of size WINDOW_SIZE around the next_token for
the given PARSER. If FILE is NULL, the output is printed on stderr. */
static void
cp_debug_parser_tokens (FILE *file, cp_parser *parser, int window_size)
{
cp_token *next_token, *first_token, *start_token;
if (file == NULL)
file = stderr;
next_token = parser->lexer->next_token;
first_token = parser->lexer->buffer->address ();
start_token = (next_token > first_token + window_size / 2)
? next_token - window_size / 2
: first_token;
cp_lexer_dump_tokens (file, parser->lexer->buffer, start_token, window_size,
next_token);
}
/* Dump debugging information for the given PARSER. If FILE is NULL,
the output is printed on stderr. */
void
cp_debug_parser (FILE *file, cp_parser *parser)
{
const size_t window_size = 20;
cp_token *token;
expanded_location eloc;
if (file == NULL)
file = stderr;
fprintf (file, "Parser state\n\n");
fprintf (file, "Number of tokens: %u\n",
vec_safe_length (parser->lexer->buffer));
cp_debug_print_tree_if_set (file, "Lookup scope", parser->scope);
cp_debug_print_tree_if_set (file, "Object scope",
parser->object_scope);
cp_debug_print_tree_if_set (file, "Qualifying scope",
parser->qualifying_scope);
cp_debug_print_context_stack (file, parser->context);
cp_debug_print_flag (file, "Allow GNU extensions",
parser->allow_gnu_extensions_p);
cp_debug_print_flag (file, "'>' token is greater-than",
parser->greater_than_is_operator_p);
cp_debug_print_flag (file, "Default args allowed in current "
"parameter list", parser->default_arg_ok_p);
cp_debug_print_flag (file, "Parsing integral constant-expression",
parser->integral_constant_expression_p);
cp_debug_print_flag (file, "Allow non-constant expression in current "
"constant-expression",
parser->allow_non_integral_constant_expression_p);
cp_debug_print_flag (file, "Seen non-constant expression",
parser->non_integral_constant_expression_p);
cp_debug_print_flag (file, "Local names and 'this' forbidden in "
"current context",
parser->local_variables_forbidden_p);
cp_debug_print_flag (file, "In unbraced linkage specification",
parser->in_unbraced_linkage_specification_p);
cp_debug_print_flag (file, "Parsing a declarator",
parser->in_declarator_p);
cp_debug_print_flag (file, "In template argument list",
parser->in_template_argument_list_p);
cp_debug_print_flag (file, "Parsing an iteration statement",
parser->in_statement & IN_ITERATION_STMT);
cp_debug_print_flag (file, "Parsing a switch statement",
parser->in_statement & IN_SWITCH_STMT);
cp_debug_print_flag (file, "Parsing a structured OpenMP block",
parser->in_statement & IN_OMP_BLOCK);
cp_debug_print_flag (file, "Parsing a Cilk Plus for loop",
parser->in_statement & IN_CILK_SIMD_FOR);
cp_debug_print_flag (file, "Parsing a an OpenMP loop",
parser->in_statement & IN_OMP_FOR);
cp_debug_print_flag (file, "Parsing an if statement",
parser->in_statement & IN_IF_STMT);
cp_debug_print_flag (file, "Parsing a type-id in an expression "
"context", parser->in_type_id_in_expr_p);
cp_debug_print_flag (file, "Declarations are implicitly extern \"C\"",
parser->implicit_extern_c);
cp_debug_print_flag (file, "String expressions should be translated "
"to execution character set",
parser->translate_strings_p);
cp_debug_print_flag (file, "Parsing function body outside of a "
"local class", parser->in_function_body);
cp_debug_print_flag (file, "Auto correct a colon to a scope operator",
parser->colon_corrects_to_scope_p);
cp_debug_print_flag (file, "Colon doesn't start a class definition",
parser->colon_doesnt_start_class_def_p);
if (parser->type_definition_forbidden_message)
fprintf (file, "Error message for forbidden type definitions: %s\n",
parser->type_definition_forbidden_message);
cp_debug_print_unparsed_queues (file, parser->unparsed_queues);
fprintf (file, "Number of class definitions in progress: %u\n",
parser->num_classes_being_defined);
fprintf (file, "Number of template parameter lists for the current "
"declaration: %u\n", parser->num_template_parameter_lists);
cp_debug_parser_tokens (file, parser, window_size);
token = parser->lexer->next_token;
fprintf (file, "Next token to parse:\n");
fprintf (file, "\tToken: ");
cp_lexer_print_token (file, token);
eloc = expand_location (token->location);
fprintf (file, "\n\tFile: %s\n", eloc.file);
fprintf (file, "\tLine: %d\n", eloc.line);
fprintf (file, "\tColumn: %d\n", eloc.column);
}
DEBUG_FUNCTION void
debug (cp_parser &ref)
{
cp_debug_parser (stderr, &ref);
}
DEBUG_FUNCTION void
debug (cp_parser *ptr)
{
if (ptr)
debug (*ptr);
else
fprintf (stderr, "<nil>\n");
}
/* Allocate memory for a new lexer object and return it. */
static cp_lexer *
cp_lexer_alloc (void)
{
cp_lexer *lexer;
c_common_no_more_pch ();
/* Allocate the memory. */
lexer = ggc_cleared_alloc<cp_lexer> ();
/* Initially we are not debugging. */
lexer->debugging_p = false;
lexer->saved_tokens.create (CP_SAVED_TOKEN_STACK);
/* Create the buffer. */
vec_alloc (lexer->buffer, CP_LEXER_BUFFER_SIZE);
return lexer;
}
/* Create a new main C++ lexer, the lexer that gets tokens from the
preprocessor. */
static cp_lexer *
cp_lexer_new_main (void)
{
cp_lexer *lexer;
cp_token token;
/* It's possible that parsing the first pragma will load a PCH file,
which is a GC collection point. So we have to do that before
allocating any memory. */
cp_parser_initial_pragma (&token);
lexer = cp_lexer_alloc ();
/* Put the first token in the buffer. */
lexer->buffer->quick_push (token);
/* Get the remaining tokens from the preprocessor. */
while (token.type != CPP_EOF)
{
cp_lexer_get_preprocessor_token (lexer, &token);
vec_safe_push (lexer->buffer, token);
}
lexer->last_token = lexer->buffer->address ()
+ lexer->buffer->length ()
- 1;
lexer->next_token = lexer->buffer->length ()
? lexer->buffer->address ()
: &eof_token;
/* Subsequent preprocessor diagnostics should use compiler
diagnostic functions to get the compiler source location. */
done_lexing = true;
gcc_assert (!lexer->next_token->purged_p);
return lexer;
}
/* Create a new lexer whose token stream is primed with the tokens in
CACHE. When these tokens are exhausted, no new tokens will be read. */
static cp_lexer *
cp_lexer_new_from_tokens (cp_token_cache *cache)
{
cp_token *first = cache->first;
cp_token *last = cache->last;
cp_lexer *lexer = ggc_cleared_alloc<cp_lexer> ();
/* We do not own the buffer. */
lexer->buffer = NULL;
lexer->next_token = first == last ? &eof_token : first;
lexer->last_token = last;
lexer->saved_tokens.create (CP_SAVED_TOKEN_STACK);
/* Initially we are not debugging. */
lexer->debugging_p = false;
gcc_assert (!lexer->next_token->purged_p);
return lexer;
}
/* Frees all resources associated with LEXER. */
static void
cp_lexer_destroy (cp_lexer *lexer)
{
vec_free (lexer->buffer);
lexer->saved_tokens.release ();
ggc_free (lexer);
}
/* Returns nonzero if debugging information should be output. */
static inline bool
cp_lexer_debugging_p (cp_lexer *lexer)
{
return lexer->debugging_p;
}
static inline cp_token_position
cp_lexer_token_position (cp_lexer *lexer, bool previous_p)
{
gcc_assert (!previous_p || lexer->next_token != &eof_token);
return lexer->next_token - previous_p;
}
static inline cp_token *
cp_lexer_token_at (cp_lexer * /*lexer*/, cp_token_position pos)
{
return pos;
}
static inline void
cp_lexer_set_token_position (cp_lexer *lexer, cp_token_position pos)
{
lexer->next_token = cp_lexer_token_at (lexer, pos);
}
static inline cp_token_position
cp_lexer_previous_token_position (cp_lexer *lexer)
{
if (lexer->next_token == &eof_token)
return lexer->last_token - 1;
else
return cp_lexer_token_position (lexer, true);
}
static inline cp_token *
cp_lexer_previous_token (cp_lexer *lexer)
{
cp_token_position tp = cp_lexer_previous_token_position (lexer);
return cp_lexer_token_at (lexer, tp);
}
/* nonzero if we are presently saving tokens. */
static inline int
cp_lexer_saving_tokens (const cp_lexer* lexer)
{
return lexer->saved_tokens.length () != 0;
}
/* Store the next token from the preprocessor in *TOKEN. Return true
if we reach EOF. If LEXER is NULL, assume we are handling an
initial #pragma pch_preprocess, and thus want the lexer to return
processed strings. */
static void
cp_lexer_get_preprocessor_token (cp_lexer *lexer, cp_token *token)
{
static int is_extern_c = 0;
/* Get a new token from the preprocessor. */
token->type
= c_lex_with_flags (&token->u.value, &token->location, &token->flags,
lexer == NULL ? 0 : C_LEX_STRING_NO_JOIN);
token->keyword = RID_MAX;
token->pragma_kind = PRAGMA_NONE;
token->purged_p = false;
token->error_reported = false;
/* On some systems, some header files are surrounded by an
implicit extern "C" block. Set a flag in the token if it
comes from such a header. */
is_extern_c += pending_lang_change;
pending_lang_change = 0;
token->implicit_extern_c = is_extern_c > 0;
/* Check to see if this token is a keyword. */
if (token->type == CPP_NAME)
{
if (C_IS_RESERVED_WORD (token->u.value))
{
/* Mark this token as a keyword. */
token->type = CPP_KEYWORD;
/* Record which keyword. */
token->keyword = C_RID_CODE (token->u.value);
}
else
{
if (warn_cxx0x_compat
&& C_RID_CODE (token->u.value) >= RID_FIRST_CXX0X
&& C_RID_CODE (token->u.value) <= RID_LAST_CXX0X)
{
/* Warn about the C++0x keyword (but still treat it as
an identifier). */
warning (OPT_Wc__0x_compat,
"identifier %qE is a keyword in C++11",
token->u.value);
/* Clear out the C_RID_CODE so we don't warn about this
particular identifier-turned-keyword again. */
C_SET_RID_CODE (token->u.value, RID_MAX);
}
token->keyword = RID_MAX;
}
}
else if (token->type == CPP_AT_NAME)
{
/* This only happens in Objective-C++; it must be a keyword. */
token->type = CPP_KEYWORD;
switch (C_RID_CODE (token->u.value))
{
/* Replace 'class' with '@class', 'private' with '@private',
etc. This prevents confusion with the C++ keyword
'class', and makes the tokens consistent with other
Objective-C 'AT' keywords. For example '@class' is
reported as RID_AT_CLASS which is consistent with
'@synchronized', which is reported as
RID_AT_SYNCHRONIZED.
*/
case RID_CLASS: token->keyword = RID_AT_CLASS; break;
case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break;
case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break;
case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break;
case RID_THROW: token->keyword = RID_AT_THROW; break;
case RID_TRY: token->keyword = RID_AT_TRY; break;
case RID_CATCH: token->keyword = RID_AT_CATCH; break;
default: token->keyword = C_RID_CODE (token->u.value);
}
}
else if (token->type == CPP_PRAGMA)
{
/* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */
token->pragma_kind = ((enum pragma_kind)
TREE_INT_CST_LOW (token->u.value));
token->u.value = NULL_TREE;
}
}
/* Update the globals input_location and the input file stack from TOKEN. */
static inline void
cp_lexer_set_source_position_from_token (cp_token *token)
{
if (token->type != CPP_EOF)
{
input_location = token->location;
}
}
/* Update the globals input_location and the input file stack from LEXER. */
static inline void
cp_lexer_set_source_position (cp_lexer *lexer)
{
cp_token *token = cp_lexer_peek_token (lexer);
cp_lexer_set_source_position_from_token (token);
}
/* Return a pointer to the next token in the token stream, but do not
consume it. */
static inline cp_token *
cp_lexer_peek_token (cp_lexer *lexer)
{
if (cp_lexer_debugging_p (lexer))
{
fputs ("cp_lexer: peeking at token: ", cp_lexer_debug_stream);
cp_lexer_print_token (cp_lexer_debug_stream, lexer->next_token);
putc ('\n', cp_lexer_debug_stream);
}
return lexer->next_token;
}
/* Return true if the next token has the indicated TYPE. */
static inline bool
cp_lexer_next_token_is (cp_lexer* lexer, enum cpp_ttype type)
{
return cp_lexer_peek_token (lexer)->type == type;
}
/* Return true if the next token does not have the indicated TYPE. */
static inline bool
cp_lexer_next_token_is_not (cp_lexer* lexer, enum cpp_ttype type)
{
return !cp_lexer_next_token_is (lexer, type);
}
/* Return true if the next token is the indicated KEYWORD. */
static inline bool
cp_lexer_next_token_is_keyword (cp_lexer* lexer, enum rid keyword)
{
return cp_lexer_peek_token (lexer)->keyword == keyword;
}
static inline bool
cp_lexer_nth_token_is (cp_lexer* lexer, size_t n, enum cpp_ttype type)
{
return cp_lexer_peek_nth_token (lexer, n)->type == type;
}
static inline bool
cp_lexer_nth_token_is_keyword (cp_lexer* lexer, size_t n, enum rid keyword)
{
return cp_lexer_peek_nth_token (lexer, n)->keyword == keyword;
}
/* Return true if the next token is not the indicated KEYWORD. */
static inline bool
cp_lexer_next_token_is_not_keyword (cp_lexer* lexer, enum rid keyword)
{
return cp_lexer_peek_token (lexer)->keyword != keyword;
}
/* Return true if the next token is a keyword for a decl-specifier. */
static bool
cp_lexer_next_token_is_decl_specifier_keyword (cp_lexer *lexer)
{
cp_token *token;
token = cp_lexer_peek_token (lexer);
switch (token->keyword)
{
/* auto specifier: storage-class-specifier in C++,
simple-type-specifier in C++0x. */
case RID_AUTO:
/* Storage classes. */
case RID_REGISTER:
case RID_STATIC:
case RID_EXTERN:
case RID_MUTABLE:
case RID_THREAD:
/* Elaborated type specifiers. */
case RID_ENUM:
case RID_CLASS:
case RID_STRUCT:
case RID_UNION:
case RID_TYPENAME:
/* Simple type specifiers. */
case RID_CHAR:
case RID_CHAR16:
case RID_CHAR32:
case RID_WCHAR:
case RID_BOOL:
case RID_SHORT:
case RID_INT:
case RID_LONG:
case RID_SIGNED:
case RID_UNSIGNED:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
/* GNU extensions. */
case RID_ATTRIBUTE:
case RID_TYPEOF:
/* C++0x extensions. */
case RID_DECLTYPE:
case RID_UNDERLYING_TYPE:
return true;
default:
if (token->keyword >= RID_FIRST_INT_N
&& token->keyword < RID_FIRST_INT_N + NUM_INT_N_ENTS
&& int_n_enabled_p[token->keyword - RID_FIRST_INT_N])
return true;
return false;
}
}
/* Returns TRUE iff the token T begins a decltype type. */
static bool
token_is_decltype (cp_token *t)
{
return (t->keyword == RID_DECLTYPE
|| t->type == CPP_DECLTYPE);
}
/* Returns TRUE iff the next token begins a decltype type. */
static bool
cp_lexer_next_token_is_decltype (cp_lexer *lexer)
{
cp_token *t = cp_lexer_peek_token (lexer);
return token_is_decltype (t);
}
/* Return a pointer to the Nth token in the token stream. If N is 1,
then this is precisely equivalent to cp_lexer_peek_token (except
that it is not inline). One would like to disallow that case, but
there is one case (cp_parser_nth_token_starts_template_id) where
the caller passes a variable for N and it might be 1. */
static cp_token *
cp_lexer_peek_nth_token (cp_lexer* lexer, size_t n)
{
cp_token *token;
/* N is 1-based, not zero-based. */
gcc_assert (n > 0);
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream,
"cp_lexer: peeking ahead %ld at token: ", (long)n);
--n;
token = lexer->next_token;
gcc_assert (!n || token != &eof_token);
while (n != 0)
{
++token;
if (token == lexer->last_token)
{
token = &eof_token;
break;
}
if (!token->purged_p)
--n;
}
if (cp_lexer_debugging_p (lexer))
{
cp_lexer_print_token (cp_lexer_debug_stream, token);
putc ('\n', cp_lexer_debug_stream);
}
return token;
}
/* Return the next token, and advance the lexer's next_token pointer
to point to the next non-purged token. */
static cp_token *
cp_lexer_consume_token (cp_lexer* lexer)
{
cp_token *token = lexer->next_token;
gcc_assert (token != &eof_token);
gcc_assert (!lexer->in_pragma || token->type != CPP_PRAGMA_EOL);
do
{
lexer->next_token++;
if (lexer->next_token == lexer->last_token)
{
lexer->next_token = &eof_token;
break;
}
}
while (lexer->next_token->purged_p);
cp_lexer_set_source_position_from_token (token);
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
{
fputs ("cp_lexer: consuming token: ", cp_lexer_debug_stream);
cp_lexer_print_token (cp_lexer_debug_stream, token);
putc ('\n', cp_lexer_debug_stream);
}
return token;
}
/* Permanently remove the next token from the token stream, and
advance the next_token pointer to refer to the next non-purged
token. */
static void
cp_lexer_purge_token (cp_lexer *lexer)
{
cp_token *tok = lexer->next_token;
gcc_assert (tok != &eof_token);
tok->purged_p = true;
tok->location = UNKNOWN_LOCATION;
tok->u.value = NULL_TREE;
tok->keyword = RID_MAX;
do
{
tok++;
if (tok == lexer->last_token)
{
tok = &eof_token;
break;
}
}
while (tok->purged_p);
lexer->next_token = tok;
}
/* Permanently remove all tokens after TOK, up to, but not
including, the token that will be returned next by
cp_lexer_peek_token. */
static void
cp_lexer_purge_tokens_after (cp_lexer *lexer, cp_token *tok)
{
cp_token *peek = lexer->next_token;
if (peek == &eof_token)
peek = lexer->last_token;
gcc_assert (tok < peek);
for ( tok += 1; tok != peek; tok += 1)
{
tok->purged_p = true;
tok->location = UNKNOWN_LOCATION;
tok->u.value = NULL_TREE;
tok->keyword = RID_MAX;
}
}
/* Begin saving tokens. All tokens consumed after this point will be
preserved. */
static void
cp_lexer_save_tokens (cp_lexer* lexer)
{
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: saving tokens\n");
lexer->saved_tokens.safe_push (lexer->next_token);
}
/* Commit to the portion of the token stream most recently saved. */
static void
cp_lexer_commit_tokens (cp_lexer* lexer)
{
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: committing tokens\n");
lexer->saved_tokens.pop ();
}
/* Return all tokens saved since the last call to cp_lexer_save_tokens
to the token stream. Stop saving tokens. */
static void
cp_lexer_rollback_tokens (cp_lexer* lexer)
{
/* Provide debugging output. */
if (cp_lexer_debugging_p (lexer))
fprintf (cp_lexer_debug_stream, "cp_lexer: restoring tokens\n");
lexer->next_token = lexer->saved_tokens.pop ();
}
/* RAII wrapper around the above functions, with sanity checking. Creating
a variable saves tokens, which are committed when the variable is
destroyed unless they are explicitly rolled back by calling the rollback
member function. */
struct saved_token_sentinel
{
cp_lexer *lexer;
unsigned len;
bool commit;
saved_token_sentinel(cp_lexer *lexer): lexer(lexer), commit(true)
{
len = lexer->saved_tokens.length ();
cp_lexer_save_tokens (lexer);
}
void rollback ()
{
cp_lexer_rollback_tokens (lexer);
commit = false;
}
~saved_token_sentinel()
{
if (commit)
cp_lexer_commit_tokens (lexer);
gcc_assert (lexer->saved_tokens.length () == len);
}
};
/* Print a representation of the TOKEN on the STREAM. */
static void
cp_lexer_print_token (FILE * stream, cp_token *token)
{
/* We don't use cpp_type2name here because the parser defines
a few tokens of its own. */
static const char *const token_names[] = {
/* cpplib-defined token types */
#define OP(e, s) #e,
#define TK(e, s) #e,
TTYPE_TABLE
#undef OP
#undef TK
/* C++ parser token types - see "Manifest constants", above. */
"KEYWORD",
"TEMPLATE_ID",
"NESTED_NAME_SPECIFIER",
};
/* For some tokens, print the associated data. */
switch (token->type)
{
case CPP_KEYWORD:
/* Some keywords have a value that is not an IDENTIFIER_NODE.
For example, `struct' is mapped to an INTEGER_CST. */
if (!identifier_p (token->u.value))
break;
/* else fall through */
case CPP_NAME:
fputs (IDENTIFIER_POINTER (token->u.value), stream);
break;
case CPP_STRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_WSTRING:
case CPP_UTF8STRING:
fprintf (stream, " \"%s\"", TREE_STRING_POINTER (token->u.value));
break;
case CPP_NUMBER:
print_generic_expr (stream, token->u.value, 0);
break;
default:
/* If we have a name for the token, print it out. Otherwise, we
simply give the numeric code. */
if (token->type < ARRAY_SIZE(token_names))
fputs (token_names[token->type], stream);
else
fprintf (stream, "[%d]", token->type);
break;
}
}
DEBUG_FUNCTION void
debug (cp_token &ref)
{
cp_lexer_print_token (stderr, &ref);
fprintf (stderr, "\n");
}
DEBUG_FUNCTION void
debug (cp_token *ptr)
{
if (ptr)
debug (*ptr);
else
fprintf (stderr, "<nil>\n");
}
/* Start emitting debugging information. */
static void
cp_lexer_start_debugging (cp_lexer* lexer)
{
lexer->debugging_p = true;
cp_lexer_debug_stream = stderr;
}
/* Stop emitting debugging information. */
static void
cp_lexer_stop_debugging (cp_lexer* lexer)
{
lexer->debugging_p = false;
cp_lexer_debug_stream = NULL;
}
/* Create a new cp_token_cache, representing a range of tokens. */
static cp_token_cache *
cp_token_cache_new (cp_token *first, cp_token *last)
{
cp_token_cache *cache = ggc_alloc<cp_token_cache> ();
cache->first = first;
cache->last = last;
return cache;
}
/* Diagnose if #pragma omp declare simd isn't followed immediately
by function declaration or definition. */
static inline void
cp_ensure_no_omp_declare_simd (cp_parser *parser)
{
if (parser->omp_declare_simd && !parser->omp_declare_simd->error_seen)
{
error ("%<#pragma omp declare simd%> not immediately followed by "
"function declaration or definition");
parser->omp_declare_simd = NULL;
}
}
/* Finalize #pragma omp declare simd clauses after FNDECL has been parsed,
and put that into "omp declare simd" attribute. */
static inline void
cp_finalize_omp_declare_simd (cp_parser *parser, tree fndecl)
{
if (__builtin_expect (parser->omp_declare_simd != NULL, 0))
{
if (fndecl == error_mark_node)
{
parser->omp_declare_simd = NULL;
return;
}
if (TREE_CODE (fndecl) != FUNCTION_DECL)
{
cp_ensure_no_omp_declare_simd (parser);
return;
}
}
}
/* Decl-specifiers. */
/* Set *DECL_SPECS to represent an empty decl-specifier-seq. */
static void
clear_decl_specs (cp_decl_specifier_seq *decl_specs)
{
memset (decl_specs, 0, sizeof (cp_decl_specifier_seq));
}
/* Declarators. */
/* Nothing other than the parser should be creating declarators;
declarators are a semi-syntactic representation of C++ entities.
Other parts of the front end that need to create entities (like
VAR_DECLs or FUNCTION_DECLs) should do that directly. */
static cp_declarator *make_call_declarator
(cp_declarator *, tree, cp_cv_quals, cp_virt_specifiers, cp_ref_qualifier, tree, tree);
static cp_declarator *make_array_declarator
(cp_declarator *, tree);
static cp_declarator *make_pointer_declarator
(cp_cv_quals, cp_declarator *, tree);
static cp_declarator *make_reference_declarator
(cp_cv_quals, cp_declarator *, bool, tree);
static cp_parameter_declarator *make_parameter_declarator
(cp_decl_specifier_seq *, cp_declarator *, tree);
static cp_declarator *make_ptrmem_declarator
(cp_cv_quals, tree, cp_declarator *, tree);
/* An erroneous declarator. */
static cp_declarator *cp_error_declarator;
/* The obstack on which declarators and related data structures are
allocated. */
static struct obstack declarator_obstack;
/* Alloc BYTES from the declarator memory pool. */
static inline void *
alloc_declarator (size_t bytes)
{
return obstack_alloc (&declarator_obstack, bytes);
}
/* Allocate a declarator of the indicated KIND. Clear fields that are
common to all declarators. */
static cp_declarator *
make_declarator (cp_declarator_kind kind)
{
cp_declarator *declarator;
declarator = (cp_declarator *) alloc_declarator (sizeof (cp_declarator));
declarator->kind = kind;
declarator->attributes = NULL_TREE;
declarator->std_attributes = NULL_TREE;
declarator->declarator = NULL;
declarator->parameter_pack_p = false;
declarator->id_loc = UNKNOWN_LOCATION;
return declarator;
}
/* Make a declarator for a generalized identifier. If
QUALIFYING_SCOPE is non-NULL, the identifier is
QUALIFYING_SCOPE::UNQUALIFIED_NAME; otherwise, it is just
UNQUALIFIED_NAME. SFK indicates the kind of special function this
is, if any. */
static cp_declarator *
make_id_declarator (tree qualifying_scope, tree unqualified_name,
special_function_kind sfk)
{
cp_declarator *declarator;
/* It is valid to write:
class C { void f(); };
typedef C D;
void D::f();
The standard is not clear about whether `typedef const C D' is
legal; as of 2002-09-15 the committee is considering that
question. EDG 3.0 allows that syntax. Therefore, we do as
well. */
if (qualifying_scope && TYPE_P (qualifying_scope))
qualifying_scope = TYPE_MAIN_VARIANT (qualifying_scope);
gcc_assert (identifier_p (unqualified_name)
|| TREE_CODE (unqualified_name) == BIT_NOT_EXPR
|| TREE_CODE (unqualified_name) == TEMPLATE_ID_EXPR);
declarator = make_declarator (cdk_id);
declarator->u.id.qualifying_scope = qualifying_scope;
declarator->u.id.unqualified_name = unqualified_name;
declarator->u.id.sfk = sfk;
return declarator;
}
/* Make a declarator for a pointer to TARGET. CV_QUALIFIERS is a list
of modifiers such as const or volatile to apply to the pointer
type, represented as identifiers. ATTRIBUTES represent the attributes that
appertain to the pointer or reference. */
cp_declarator *
make_pointer_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target,
tree attributes)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_pointer);
declarator->declarator = target;
declarator->u.pointer.qualifiers = cv_qualifiers;
declarator->u.pointer.class_type = NULL_TREE;
if (target)
{
declarator->id_loc = target->id_loc;
declarator->parameter_pack_p = target->parameter_pack_p;
target->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
declarator->std_attributes = attributes;
return declarator;
}
/* Like make_pointer_declarator -- but for references. ATTRIBUTES
represent the attributes that appertain to the pointer or
reference. */
cp_declarator *
make_reference_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target,
bool rvalue_ref, tree attributes)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_reference);
declarator->declarator = target;
declarator->u.reference.qualifiers = cv_qualifiers;
declarator->u.reference.rvalue_ref = rvalue_ref;
if (target)
{
declarator->id_loc = target->id_loc;
declarator->parameter_pack_p = target->parameter_pack_p;
target->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
declarator->std_attributes = attributes;
return declarator;
}
/* Like make_pointer_declarator -- but for a pointer to a non-static
member of CLASS_TYPE. ATTRIBUTES represent the attributes that
appertain to the pointer or reference. */
cp_declarator *
make_ptrmem_declarator (cp_cv_quals cv_qualifiers, tree class_type,
cp_declarator *pointee,
tree attributes)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_ptrmem);
declarator->declarator = pointee;
declarator->u.pointer.qualifiers = cv_qualifiers;
declarator->u.pointer.class_type = class_type;
if (pointee)
{
declarator->parameter_pack_p = pointee->parameter_pack_p;
pointee->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
declarator->std_attributes = attributes;
return declarator;
}
/* Make a declarator for the function given by TARGET, with the
indicated PARMS. The CV_QUALIFIERS aply to the function, as in
"const"-qualified member function. The EXCEPTION_SPECIFICATION
indicates what exceptions can be thrown. */
cp_declarator *
make_call_declarator (cp_declarator *target,
tree parms,
cp_cv_quals cv_qualifiers,
cp_virt_specifiers virt_specifiers,
cp_ref_qualifier ref_qualifier,
tree exception_specification,
tree late_return_type)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_function);
declarator->declarator = target;
declarator->u.function.parameters = parms;
declarator->u.function.qualifiers = cv_qualifiers;
declarator->u.function.virt_specifiers = virt_specifiers;
declarator->u.function.ref_qualifier = ref_qualifier;
declarator->u.function.exception_specification = exception_specification;
declarator->u.function.late_return_type = late_return_type;
if (target)
{
declarator->id_loc = target->id_loc;
declarator->parameter_pack_p = target->parameter_pack_p;
target->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
return declarator;
}
/* Make a declarator for an array of BOUNDS elements, each of which is
defined by ELEMENT. */
cp_declarator *
make_array_declarator (cp_declarator *element, tree bounds)
{
cp_declarator *declarator;
declarator = make_declarator (cdk_array);
declarator->declarator = element;
declarator->u.array.bounds = bounds;
if (element)
{
declarator->id_loc = element->id_loc;
declarator->parameter_pack_p = element->parameter_pack_p;
element->parameter_pack_p = false;
}
else
declarator->parameter_pack_p = false;
return declarator;
}
/* Determine whether the declarator we've seen so far can be a
parameter pack, when followed by an ellipsis. */
static bool
declarator_can_be_parameter_pack (cp_declarator *declarator)
{
/* Search for a declarator name, or any other declarator that goes
after the point where the ellipsis could appear in a parameter
pack. If we find any of these, then this declarator can not be
made into a parameter pack. */
bool found = false;
while (declarator && !found)
{
switch ((int)declarator->kind)
{
case cdk_id:
case cdk_array:
found = true;
break;
case cdk_error:
return true;
default:
declarator = declarator->declarator;
break;
}
}
return !found;
}
cp_parameter_declarator *no_parameters;
/* Create a parameter declarator with the indicated DECL_SPECIFIERS,
DECLARATOR and DEFAULT_ARGUMENT. */
cp_parameter_declarator *
make_parameter_declarator (cp_decl_specifier_seq *decl_specifiers,
cp_declarator *declarator,
tree default_argument)
{
cp_parameter_declarator *parameter;
parameter = ((cp_parameter_declarator *)
alloc_declarator (sizeof (cp_parameter_declarator)));
parameter->next = NULL;
if (decl_specifiers)
parameter->decl_specifiers = *decl_specifiers;
else
clear_decl_specs (¶meter->decl_specifiers);
parameter->declarator = declarator;
parameter->default_argument = default_argument;
parameter->ellipsis_p = false;
return parameter;
}
/* Returns true iff DECLARATOR is a declaration for a function. */
static bool
function_declarator_p (const cp_declarator *declarator)
{
while (declarator)
{
if (declarator->kind == cdk_function
&& declarator->declarator->kind == cdk_id)
return true;
if (declarator->kind == cdk_id
|| declarator->kind == cdk_error)
return false;
declarator = declarator->declarator;
}
return false;
}
/* The parser. */
/* Overview
--------
A cp_parser parses the token stream as specified by the C++
grammar. Its job is purely parsing, not semantic analysis. For
example, the parser breaks the token stream into declarators,
expressions, statements, and other similar syntactic constructs.
It does not check that the types of the expressions on either side
of an assignment-statement are compatible, or that a function is
not declared with a parameter of type `void'.
The parser invokes routines elsewhere in the compiler to perform
semantic analysis and to build up the abstract syntax tree for the
code processed.
The parser (and the template instantiation code, which is, in a
way, a close relative of parsing) are the only parts of the
compiler that should be calling push_scope and pop_scope, or
related functions. The parser (and template instantiation code)
keeps track of what scope is presently active; everything else
should simply honor that. (The code that generates static
initializers may also need to set the scope, in order to check
access control correctly when emitting the initializers.)
Methodology
-----------
The parser is of the standard recursive-descent variety. Upcoming
tokens in the token stream are examined in order to determine which
production to use when parsing a non-terminal. Some C++ constructs
require arbitrary look ahead to disambiguate. For example, it is
impossible, in the general case, to tell whether a statement is an
expression or declaration without scanning the entire statement.
Therefore, the parser is capable of "parsing tentatively." When the
parser is not sure what construct comes next, it enters this mode.
Then, while we attempt to parse the construct, the parser queues up
error messages, rather than issuing them immediately, and saves the
tokens it consumes. If the construct is parsed successfully, the
parser "commits", i.e., it issues any queued error messages and
the tokens that were being preserved are permanently discarded.
If, however, the construct is not parsed successfully, the parser
rolls back its state completely so that it can resume parsing using
a different alternative.
Future Improvements
-------------------
The performance of the parser could probably be improved substantially.
We could often eliminate the need to parse tentatively by looking ahead
a little bit. In some places, this approach might not entirely eliminate
the need to parse tentatively, but it might still speed up the average
case. */
/* Flags that are passed to some parsing functions. These values can
be bitwise-ored together. */
enum
{
/* No flags. */
CP_PARSER_FLAGS_NONE = 0x0,
/* The construct is optional. If it is not present, then no error
should be issued. */
CP_PARSER_FLAGS_OPTIONAL = 0x1,
/* When parsing a type-specifier, treat user-defined type-names
as non-type identifiers. */
CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES = 0x2,
/* When parsing a type-specifier, do not try to parse a class-specifier
or enum-specifier. */
CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS = 0x4,
/* When parsing a decl-specifier-seq, only allow type-specifier or
constexpr. */
CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR = 0x8
};
/* This type is used for parameters and variables which hold
combinations of the above flags. */
typedef int cp_parser_flags;
/* The different kinds of declarators we want to parse. */
typedef enum cp_parser_declarator_kind
{
/* We want an abstract declarator. */
CP_PARSER_DECLARATOR_ABSTRACT,
/* We want a named declarator. */
CP_PARSER_DECLARATOR_NAMED,
/* We don't mind, but the name must be an unqualified-id. */
CP_PARSER_DECLARATOR_EITHER
} cp_parser_declarator_kind;
/* The precedence values used to parse binary expressions. The minimum value
of PREC must be 1, because zero is reserved to quickly discriminate
binary operators from other tokens. */
enum cp_parser_prec
{
PREC_NOT_OPERATOR,
PREC_LOGICAL_OR_EXPRESSION,
PREC_LOGICAL_AND_EXPRESSION,
PREC_INCLUSIVE_OR_EXPRESSION,
PREC_EXCLUSIVE_OR_EXPRESSION,
PREC_AND_EXPRESSION,
PREC_EQUALITY_EXPRESSION,
PREC_RELATIONAL_EXPRESSION,
PREC_SHIFT_EXPRESSION,
PREC_ADDITIVE_EXPRESSION,
PREC_MULTIPLICATIVE_EXPRESSION,
PREC_PM_EXPRESSION,
NUM_PREC_VALUES = PREC_PM_EXPRESSION
};
/* A mapping from a token type to a corresponding tree node type, with a
precedence value. */
typedef struct cp_parser_binary_operations_map_node
{
/* The token type. */
enum cpp_ttype token_type;
/* The corresponding tree code. */
enum tree_code tree_type;
/* The precedence of this operator. */
enum cp_parser_prec prec;
} cp_parser_binary_operations_map_node;
typedef struct cp_parser_expression_stack_entry
{
/* Left hand side of the binary operation we are currently
parsing. */
tree lhs;
/* Original tree code for left hand side, if it was a binary
expression itself (used for -Wparentheses). */
enum tree_code lhs_type;
/* Tree code for the binary operation we are parsing. */
enum tree_code tree_type;
/* Precedence of the binary operation we are parsing. */
enum cp_parser_prec prec;
/* Location of the binary operation we are parsing. */
location_t loc;
} cp_parser_expression_stack_entry;
/* The stack for storing partial expressions. We only need NUM_PREC_VALUES
entries because precedence levels on the stack are monotonically
increasing. */
typedef struct cp_parser_expression_stack_entry
cp_parser_expression_stack[NUM_PREC_VALUES];
/* Prototypes. */
/* Constructors and destructors. */
static cp_parser_context *cp_parser_context_new
(cp_parser_context *);
/* Class variables. */
static GTY((deletable)) cp_parser_context* cp_parser_context_free_list;
/* The operator-precedence table used by cp_parser_binary_expression.
Transformed into an associative array (binops_by_token) by
cp_parser_new. */
static const cp_parser_binary_operations_map_node binops[] = {
{ CPP_DEREF_STAR, MEMBER_REF, PREC_PM_EXPRESSION },
{ CPP_DOT_STAR, DOTSTAR_EXPR, PREC_PM_EXPRESSION },
{ CPP_MULT, MULT_EXPR, PREC_MULTIPLICATIVE_EXPRESSION },
{ CPP_DIV, TRUNC_DIV_EXPR, PREC_MULTIPLICATIVE_EXPRESSION },
{ CPP_MOD, TRUNC_MOD_EXPR, PREC_MULTIPLICATIVE_EXPRESSION },
{ CPP_PLUS, PLUS_EXPR, PREC_ADDITIVE_EXPRESSION },
{ CPP_MINUS, MINUS_EXPR, PREC_ADDITIVE_EXPRESSION },
{ CPP_LSHIFT, LSHIFT_EXPR, PREC_SHIFT_EXPRESSION },
{ CPP_RSHIFT, RSHIFT_EXPR, PREC_SHIFT_EXPRESSION },
{ CPP_LESS, LT_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_GREATER, GT_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_LESS_EQ, LE_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_GREATER_EQ, GE_EXPR, PREC_RELATIONAL_EXPRESSION },
{ CPP_EQ_EQ, EQ_EXPR, PREC_EQUALITY_EXPRESSION },
{ CPP_NOT_EQ, NE_EXPR, PREC_EQUALITY_EXPRESSION },
{ CPP_AND, BIT_AND_EXPR, PREC_AND_EXPRESSION },
{ CPP_XOR, BIT_XOR_EXPR, PREC_EXCLUSIVE_OR_EXPRESSION },
{ CPP_OR, BIT_IOR_EXPR, PREC_INCLUSIVE_OR_EXPRESSION },
{ CPP_AND_AND, TRUTH_ANDIF_EXPR, PREC_LOGICAL_AND_EXPRESSION },
{ CPP_OR_OR, TRUTH_ORIF_EXPR, PREC_LOGICAL_OR_EXPRESSION }
};
/* The same as binops, but initialized by cp_parser_new so that
binops_by_token[N].token_type == N. Used in cp_parser_binary_expression
for speed. */
static cp_parser_binary_operations_map_node binops_by_token[N_CP_TTYPES];
/* Constructors and destructors. */
/* Construct a new context. The context below this one on the stack
is given by NEXT. */
static cp_parser_context *
cp_parser_context_new (cp_parser_context* next)
{
cp_parser_context *context;
/* Allocate the storage. */
if (cp_parser_context_free_list != NULL)
{
/* Pull the first entry from the free list. */
context = cp_parser_context_free_list;
cp_parser_context_free_list = context->next;
memset (context, 0, sizeof (*context));
}
else
context = ggc_cleared_alloc<cp_parser_context> ();
/* No errors have occurred yet in this context. */
context->status = CP_PARSER_STATUS_KIND_NO_ERROR;
/* If this is not the bottommost context, copy information that we
need from the previous context. */
if (next)
{
/* If, in the NEXT context, we are parsing an `x->' or `x.'
expression, then we are parsing one in this context, too. */
context->object_type = next->object_type;
/* Thread the stack. */
context->next = next;
}
return context;
}
/* Managing the unparsed function queues. */
#define unparsed_funs_with_default_args \
parser->unparsed_queues->last ().funs_with_default_args
#define unparsed_funs_with_definitions \
parser->unparsed_queues->last ().funs_with_definitions
#define unparsed_nsdmis \
parser->unparsed_queues->last ().nsdmis
#define unparsed_classes \
parser->unparsed_queues->last ().classes
static void
push_unparsed_function_queues (cp_parser *parser)
{
cp_unparsed_functions_entry e = {NULL, make_tree_vector (), NULL, NULL};
vec_safe_push (parser->unparsed_queues, e);
}
static void
pop_unparsed_function_queues (cp_parser *parser)
{
release_tree_vector (unparsed_funs_with_definitions);
parser->unparsed_queues->pop ();
}
/* Prototypes. */
/* Constructors and destructors. */
static cp_parser *cp_parser_new
(void);
/* Routines to parse various constructs.
Those that return `tree' will return the error_mark_node (rather
than NULL_TREE) if a parse error occurs, unless otherwise noted.
Sometimes, they will return an ordinary node if error-recovery was
attempted, even though a parse error occurred. So, to check
whether or not a parse error occurred, you should always use
cp_parser_error_occurred. If the construct is optional (indicated
either by an `_opt' in the name of the function that does the
parsing or via a FLAGS parameter), then NULL_TREE is returned if
the construct is not present. */
/* Lexical conventions [gram.lex] */
static tree cp_parser_identifier
(cp_parser *);
static tree cp_parser_string_literal
(cp_parser *, bool, bool, bool);
static tree cp_parser_userdef_char_literal
(cp_parser *);
static tree cp_parser_userdef_string_literal
(tree);
static tree cp_parser_userdef_numeric_literal
(cp_parser *);
/* Basic concepts [gram.basic] */
static bool cp_parser_translation_unit
(cp_parser *);
/* Expressions [gram.expr] */
static tree cp_parser_primary_expression
(cp_parser *, bool, bool, bool, cp_id_kind *);
static tree cp_parser_id_expression
(cp_parser *, bool, bool, bool *, bool, bool);
static tree cp_parser_unqualified_id
(cp_parser *, bool, bool, bool, bool);
static tree cp_parser_nested_name_specifier_opt
(cp_parser *, bool, bool, bool, bool);
static tree cp_parser_nested_name_specifier
(cp_parser *, bool, bool, bool, bool);
static tree cp_parser_qualifying_entity
(cp_parser *, bool, bool, bool, bool, bool);
static tree cp_parser_postfix_expression
(cp_parser *, bool, bool, bool, bool, cp_id_kind *);
static tree cp_parser_postfix_open_square_expression
(cp_parser *, tree, bool, bool);
static tree cp_parser_postfix_dot_deref_expression
(cp_parser *, enum cpp_ttype, tree, bool, cp_id_kind *, location_t);
static vec<tree, va_gc> *cp_parser_parenthesized_expression_list
(cp_parser *, int, bool, bool, bool *, bool = false);
/* Values for the second parameter of cp_parser_parenthesized_expression_list. */
enum { non_attr = 0, normal_attr = 1, id_attr = 2 };
static void cp_parser_pseudo_destructor_name
(cp_parser *, tree, tree *, tree *);
static tree cp_parser_unary_expression
(cp_parser *, cp_id_kind * = NULL, bool = false, bool = false, bool = false);
static enum tree_code cp_parser_unary_operator
(cp_token *);
static tree cp_parser_new_expression
(cp_parser *);
static vec<tree, va_gc> *cp_parser_new_placement
(cp_parser *);
static tree cp_parser_new_type_id
(cp_parser *, tree *);
static cp_declarator *cp_parser_new_declarator_opt
(cp_parser *);
static cp_declarator *cp_parser_direct_new_declarator
(cp_parser *);
static vec<tree, va_gc> *cp_parser_new_initializer
(cp_parser *);
static tree cp_parser_delete_expression
(cp_parser *);
static tree cp_parser_cast_expression
(cp_parser *, bool, bool, bool, cp_id_kind *);
static tree cp_parser_binary_expression
(cp_parser *, bool, bool, enum cp_parser_prec, cp_id_kind *);
static tree cp_parser_question_colon_clause
(cp_parser *, tree);
static tree cp_parser_assignment_expression
(cp_parser *, cp_id_kind * = NULL, bool = false, bool = false);
static enum tree_code cp_parser_assignment_operator_opt
(cp_parser *);
static tree cp_parser_expression
(cp_parser *, cp_id_kind * = NULL, bool = false, bool = false);
static tree cp_parser_constant_expression
(cp_parser *, bool = false, bool * = NULL);
static tree cp_parser_builtin_offsetof
(cp_parser *);
static tree cp_parser_lambda_expression
(cp_parser *);
static void cp_parser_lambda_introducer
(cp_parser *, tree);
static bool cp_parser_lambda_declarator_opt
(cp_parser *, tree);
static void cp_parser_lambda_body
(cp_parser *, tree);
/* Statements [gram.stmt.stmt] */
static void cp_parser_statement
(cp_parser *, tree, bool, bool *);
static void cp_parser_label_for_labeled_statement
(cp_parser *, tree);
static tree cp_parser_expression_statement
(cp_parser *, tree);
static tree cp_parser_compound_statement
(cp_parser *, tree, bool, bool);
static void cp_parser_statement_seq_opt
(cp_parser *, tree);
static tree cp_parser_selection_statement
(cp_parser *, bool *);
static tree cp_parser_condition
(cp_parser *);
static tree cp_parser_iteration_statement
(cp_parser *, bool);
static bool cp_parser_for_init_statement
(cp_parser *, tree *decl);
static tree cp_parser_for
(cp_parser *, bool);
static tree cp_parser_c_for
(cp_parser *, tree, tree, bool);
static tree cp_parser_range_for
(cp_parser *, tree, tree, tree, bool);
static void do_range_for_auto_deduction
(tree, tree);
static tree cp_parser_perform_range_for_lookup
(tree, tree *, tree *);
static tree cp_parser_range_for_member_function
(tree, tree);
static tree cp_parser_jump_statement
(cp_parser *);
static void cp_parser_declaration_statement
(cp_parser *);
static tree cp_parser_implicitly_scoped_statement
(cp_parser *, bool *);
static void cp_parser_already_scoped_statement
(cp_parser *);
/* Declarations [gram.dcl.dcl] */
static void cp_parser_declaration_seq_opt
(cp_parser *);
static void cp_parser_declaration
(cp_parser *);
static void cp_parser_block_declaration
(cp_parser *, bool);
static void cp_parser_simple_declaration
(cp_parser *, bool, tree *);
static void cp_parser_decl_specifier_seq
(cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, int *);
static tree cp_parser_storage_class_specifier_opt
(cp_parser *);
static tree cp_parser_function_specifier_opt
(cp_parser *, cp_decl_specifier_seq *);
static tree cp_parser_type_specifier
(cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, bool,
int *, bool *);
static tree cp_parser_simple_type_specifier
(cp_parser *, cp_decl_specifier_seq *, cp_parser_flags);
static tree cp_parser_type_name
(cp_parser *);
static tree cp_parser_nonclass_name
(cp_parser* parser);
static tree cp_parser_elaborated_type_specifier
(cp_parser *, bool, bool);
static tree cp_parser_enum_specifier
(cp_parser *);
static void cp_parser_enumerator_list
(cp_parser *, tree);
static void cp_parser_enumerator_definition
(cp_parser *, tree);
static tree cp_parser_namespace_name
(cp_parser *);
static void cp_parser_namespace_definition
(cp_parser *);
static void cp_parser_namespace_body
(cp_parser *);
static tree cp_parser_qualified_namespace_specifier
(cp_parser *);
static void cp_parser_namespace_alias_definition
(cp_parser *);
static bool cp_parser_using_declaration
(cp_parser *, bool);
static void cp_parser_using_directive
(cp_parser *);
static tree cp_parser_alias_declaration
(cp_parser *);
static void cp_parser_asm_definition
(cp_parser *);
static void cp_parser_linkage_specification
(cp_parser *);
static void cp_parser_static_assert
(cp_parser *, bool);
static tree cp_parser_decltype
(cp_parser *);
/* Declarators [gram.dcl.decl] */
static tree cp_parser_init_declarator
(cp_parser *, cp_decl_specifier_seq *, vec<deferred_access_check, va_gc> *,
bool, bool, int, bool *, tree *, location_t *);
static cp_declarator *cp_parser_declarator
(cp_parser *, cp_parser_declarator_kind, int *, bool *, bool, bool);
static cp_declarator *cp_parser_direct_declarator
(cp_parser *, cp_parser_declarator_kind, int *, bool, bool);
static enum tree_code cp_parser_ptr_operator
(cp_parser *, tree *, cp_cv_quals *, tree *);
static cp_cv_quals cp_parser_cv_qualifier_seq_opt
(cp_parser *);
static cp_virt_specifiers cp_parser_virt_specifier_seq_opt
(cp_parser *);
static cp_ref_qualifier cp_parser_ref_qualifier_opt
(cp_parser *);
static tree cp_parser_late_return_type_opt
(cp_parser *, cp_declarator *, cp_cv_quals);
static tree cp_parser_declarator_id
(cp_parser *, bool);
static tree cp_parser_type_id
(cp_parser *);
static tree cp_parser_template_type_arg
(cp_parser *);
static tree cp_parser_trailing_type_id (cp_parser *);
static tree cp_parser_type_id_1
(cp_parser *, bool, bool);
static void cp_parser_type_specifier_seq
(cp_parser *, bool, bool, cp_decl_specifier_seq *);
static tree cp_parser_parameter_declaration_clause
(cp_parser *);
static tree cp_parser_parameter_declaration_list
(cp_parser *, bool *);
static cp_parameter_declarator *cp_parser_parameter_declaration
(cp_parser *, bool, bool *);
static tree cp_parser_default_argument
(cp_parser *, bool);
static void cp_parser_function_body
(cp_parser *, bool);
static tree cp_parser_initializer
(cp_parser *, bool *, bool *);
static tree cp_parser_initializer_clause
(cp_parser *, bool *);
static tree cp_parser_braced_list
(cp_parser*, bool*);
static vec<constructor_elt, va_gc> *cp_parser_initializer_list
(cp_parser *, bool *);
static bool cp_parser_ctor_initializer_opt_and_function_body
(cp_parser *, bool);
static tree cp_parser_late_parsing_omp_declare_simd
(cp_parser *, tree);
static tree cp_parser_late_parsing_cilk_simd_fn_info
(cp_parser *, tree);
static tree synthesize_implicit_template_parm
(cp_parser *);
static tree finish_fully_implicit_template
(cp_parser *, tree);
/* Classes [gram.class] */
static tree cp_parser_class_name
(cp_parser *, bool, bool, enum tag_types, bool, bool, bool);
static tree cp_parser_class_specifier
(cp_parser *);
static tree cp_parser_class_head
(cp_parser *, bool *);
static enum tag_types cp_parser_class_key
(cp_parser *);
static void cp_parser_type_parameter_key
(cp_parser* parser);
static void cp_parser_member_specification_opt
(cp_parser *);
static void cp_parser_member_declaration
(cp_parser *);
static tree cp_parser_pure_specifier
(cp_parser *);
static tree cp_parser_constant_initializer
(cp_parser *);
/* Derived classes [gram.class.derived] */
static tree cp_parser_base_clause
(cp_parser *);
static tree cp_parser_base_specifier
(cp_parser *);
/* Special member functions [gram.special] */
static tree cp_parser_conversion_function_id
(cp_parser *);
static tree cp_parser_conversion_type_id
(cp_parser *);
static cp_declarator *cp_parser_conversion_declarator_opt
(cp_parser *);
static bool cp_parser_ctor_initializer_opt
(cp_parser *);
static void cp_parser_mem_initializer_list
(cp_parser *);
static tree cp_parser_mem_initializer
(cp_parser *);
static tree cp_parser_mem_initializer_id
(cp_parser *);
/* Overloading [gram.over] */
static tree cp_parser_operator_function_id
(cp_parser *);
static tree cp_parser_operator
(cp_parser *);
/* Templates [gram.temp] */
static void cp_parser_template_declaration
(cp_parser *, bool);
static tree cp_parser_template_parameter_list
(cp_parser *);
static tree cp_parser_template_parameter
(cp_parser *, bool *, bool *);
static tree cp_parser_type_parameter
(cp_parser *, bool *);
static tree cp_parser_template_id
(cp_parser *, bool, bool, enum tag_types, bool);
static tree cp_parser_template_name
(cp_parser *, bool, bool, bool, enum tag_types, bool *);
static tree cp_parser_template_argument_list
(cp_parser *);
static tree cp_parser_template_argument
(cp_parser *);
static void cp_parser_explicit_instantiation
(cp_parser *);
static void cp_parser_explicit_specialization
(cp_parser *);
/* Exception handling [gram.exception] */
static tree cp_parser_try_block
(cp_parser *);
static bool cp_parser_function_try_block
(cp_parser *);
static void cp_parser_handler_seq
(cp_parser *);
static void cp_parser_handler
(cp_parser *);
static tree cp_parser_exception_declaration
(cp_parser *);
static tree cp_parser_throw_expression
(cp_parser *);
static tree cp_parser_exception_specification_opt
(cp_parser *);
static tree cp_parser_type_id_list
(cp_parser *);
/* GNU Extensions */
static tree cp_parser_asm_specification_opt
(cp_parser *);
static tree cp_parser_asm_operand_list
(cp_parser *);
static tree cp_parser_asm_clobber_list
(cp_parser *);
static tree cp_parser_asm_label_list
(cp_parser *);
static bool cp_next_tokens_can_be_attribute_p
(cp_parser *);
static bool cp_next_tokens_can_be_gnu_attribute_p
(cp_parser *);
static bool cp_next_tokens_can_be_std_attribute_p
(cp_parser *);
static bool cp_nth_tokens_can_be_std_attribute_p
(cp_parser *, size_t);
static bool cp_nth_tokens_can_be_gnu_attribute_p
(cp_parser *, size_t);
static bool cp_nth_tokens_can_be_attribute_p
(cp_parser *, size_t);
static tree cp_parser_attributes_opt
(cp_parser *);
static tree cp_parser_gnu_attributes_opt
(cp_parser *);
static tree cp_parser_gnu_attribute_list
(cp_parser *);
static tree cp_parser_std_attribute
(cp_parser *);
static tree cp_parser_std_attribute_spec
(cp_parser *);
static tree cp_parser_std_attribute_spec_seq
(cp_parser *);
static bool cp_parser_extension_opt
(cp_parser *, int *);
static void cp_parser_label_declaration
(cp_parser *);
/* Transactional Memory Extensions */
static tree cp_parser_transaction
(cp_parser *, enum rid);
static tree cp_parser_transaction_expression
(cp_parser *, enum rid);
static bool cp_parser_function_transaction
(cp_parser *, enum rid);
static tree cp_parser_transaction_cancel
(cp_parser *);
enum pragma_context {
pragma_external,
pragma_member,
pragma_objc_icode,
pragma_stmt,
pragma_compound
};
static bool cp_parser_pragma
(cp_parser *, enum pragma_context);
/* Objective-C++ Productions */
static tree cp_parser_objc_message_receiver
(cp_parser *);
static tree cp_parser_objc_message_args
(cp_parser *);
static tree cp_parser_objc_message_expression
(cp_parser *);
static tree cp_parser_objc_encode_expression
(cp_parser *);
static tree cp_parser_objc_defs_expression
(cp_parser *);
static tree cp_parser_objc_protocol_expression
(cp_parser *);
static tree cp_parser_objc_selector_expression
(cp_parser *);
static tree cp_parser_objc_expression
(cp_parser *);
static bool cp_parser_objc_selector_p
(enum cpp_ttype);
static tree cp_parser_objc_selector
(cp_parser *);
static tree cp_parser_objc_protocol_refs_opt
(cp_parser *);
static void cp_parser_objc_declaration
(cp_parser *, tree);
static tree cp_parser_objc_statement
(cp_parser *);
static bool cp_parser_objc_valid_prefix_attributes
(cp_parser *, tree *);
static void cp_parser_objc_at_property_declaration
(cp_parser *) ;
static void cp_parser_objc_at_synthesize_declaration
(cp_parser *) ;
static void cp_parser_objc_at_dynamic_declaration
(cp_parser *) ;
static tree cp_parser_objc_struct_declaration
(cp_parser *) ;
/* Utility Routines */
static tree cp_parser_lookup_name
(cp_parser *, tree, enum tag_types, bool, bool, bool, tree *, location_t);
static tree cp_parser_lookup_name_simple
(cp_parser *, tree, location_t);
static tree cp_parser_maybe_treat_template_as_class
(tree, bool);
static bool cp_parser_check_declarator_template_parameters
(cp_parser *, cp_declarator *, location_t);
static bool cp_parser_check_template_parameters
(cp_parser *, unsigned, location_t, cp_declarator *);
static tree cp_parser_simple_cast_expression
(cp_parser *);
static tree cp_parser_global_scope_opt
(cp_parser *, bool);
static bool cp_parser_constructor_declarator_p
(cp_parser *, bool);
static tree cp_parser_function_definition_from_specifiers_and_declarator
(cp_parser *, cp_decl_specifier_seq *, tree, const cp_declarator *);
static tree cp_parser_function_definition_after_declarator
(cp_parser *, bool);
static void cp_parser_template_declaration_after_export
(cp_parser *, bool);
static void cp_parser_perform_template_parameter_access_checks
(vec<deferred_access_check, va_gc> *);
static tree cp_parser_single_declaration
(cp_parser *, vec<deferred_access_check, va_gc> *, bool, bool, bool *);
static tree cp_parser_functional_cast
(cp_parser *, tree);
static tree cp_parser_save_member_function_body
(cp_parser *, cp_decl_specifier_seq *, cp_declarator *, tree);
static tree cp_parser_save_nsdmi
(cp_parser *);
static tree cp_parser_enclosed_template_argument_list
(cp_parser *);
static void cp_parser_save_default_args
(cp_parser *, tree);
static void cp_parser_late_parsing_for_member
(cp_parser *, tree);
static tree cp_parser_late_parse_one_default_arg
(cp_parser *, tree, tree, tree);
static void cp_parser_late_parsing_nsdmi
(cp_parser *, tree);
static void cp_parser_late_parsing_default_args
(cp_parser *, tree);
static tree cp_parser_sizeof_operand
(cp_parser *, enum rid);
static tree cp_parser_trait_expr
(cp_parser *, enum rid);
static bool cp_parser_declares_only_class_p
(cp_parser *);
static void cp_parser_set_storage_class
(cp_parser *, cp_decl_specifier_seq *, enum rid, cp_token *);
static void cp_parser_set_decl_spec_type
(cp_decl_specifier_seq *, tree, cp_token *, bool);
static void set_and_check_decl_spec_loc
(cp_decl_specifier_seq *decl_specs,
cp_decl_spec ds, cp_token *);
static bool cp_parser_friend_p
(const cp_decl_specifier_seq *);
static void cp_parser_required_error
(cp_parser *, required_token, bool);
static cp_token *cp_parser_require
(cp_parser *, enum cpp_ttype, required_token);
static cp_token *cp_parser_require_keyword
(cp_parser *, enum rid, required_token);
static bool cp_parser_token_starts_function_definition_p
(cp_token *);
static bool cp_parser_next_token_starts_class_definition_p
(cp_parser *);
static bool cp_parser_next_token_ends_template_argument_p
(cp_parser *);
static bool cp_parser_nth_token_starts_template_argument_list_p
(cp_parser *, size_t);
static enum tag_types cp_parser_token_is_class_key
(cp_token *);
static enum tag_types cp_parser_token_is_type_parameter_key
(cp_token *);
static void cp_parser_check_class_key
(enum tag_types, tree type);
static void cp_parser_check_access_in_redeclaration
(tree type, location_t location);
static bool cp_parser_optional_template_keyword
(cp_parser *);
static void cp_parser_pre_parsed_nested_name_specifier
(cp_parser *);
static bool cp_parser_cache_group
(cp_parser *, enum cpp_ttype, unsigned);
static tree cp_parser_cache_defarg
(cp_parser *parser, bool nsdmi);
static void cp_parser_parse_tentatively
(cp_parser *);
static void cp_parser_commit_to_tentative_parse
(cp_parser *);
static void cp_parser_commit_to_topmost_tentative_parse
(cp_parser *);
static void cp_parser_abort_tentative_parse
(cp_parser *);
static bool cp_parser_parse_definitely
(cp_parser *);
static inline bool cp_parser_parsing_tentatively
(cp_parser *);
static bool cp_parser_uncommitted_to_tentative_parse_p
(cp_parser *);
static void cp_parser_error
(cp_parser *, const char *);
static void cp_parser_name_lookup_error
(cp_parser *, tree, tree, name_lookup_error, location_t);
static bool cp_parser_simulate_error
(cp_parser *);
static bool cp_parser_check_type_definition
(cp_parser *);
static void cp_parser_check_for_definition_in_return_type
(cp_declarator *, tree, location_t type_location);
static void cp_parser_check_for_invalid_template_id
(cp_parser *, tree, enum tag_types, location_t location);
static bool cp_parser_non_integral_constant_expression
(cp_parser *, non_integral_constant);
static void cp_parser_diagnose_invalid_type_name
(cp_parser *, tree, location_t);
static bool cp_parser_parse_and_diagnose_invalid_type_name
(cp_parser *);
static int cp_parser_skip_to_closing_parenthesis
(cp_parser *, bool, bool, bool);
static void cp_parser_skip_to_end_of_statement
(cp_parser *);
static void cp_parser_consume_semicolon_at_end_of_statement
(cp_parser *);
static void cp_parser_skip_to_end_of_block_or_statement
(cp_parser *);
static bool cp_parser_skip_to_closing_brace
(cp_parser *);
static void cp_parser_skip_to_end_of_template_parameter_list
(cp_parser *);
static void cp_parser_skip_to_pragma_eol
(cp_parser*, cp_token *);
static bool cp_parser_error_occurred
(cp_parser *);
static bool cp_parser_allow_gnu_extensions_p
(cp_parser *);
static bool cp_parser_is_pure_string_literal
(cp_token *);
static bool cp_parser_is_string_literal
(cp_token *);
static bool cp_parser_is_keyword
(cp_token *, enum rid);
static tree cp_parser_make_typename_type
(cp_parser *, tree, location_t location);
static cp_declarator * cp_parser_make_indirect_declarator
(enum tree_code, tree, cp_cv_quals, cp_declarator *, tree);
static bool cp_parser_compound_literal_p
(cp_parser *);
static bool cp_parser_array_designator_p
(cp_parser *);
static bool cp_parser_skip_to_closing_square_bracket
(cp_parser *);
/* Returns nonzero if we are parsing tentatively. */
static inline bool
cp_parser_parsing_tentatively (cp_parser* parser)
{
return parser->context->next != NULL;
}
/* Returns nonzero if TOKEN is a string literal. */
static bool
cp_parser_is_pure_string_literal (cp_token* token)
{
return (token->type == CPP_STRING ||
token->type == CPP_STRING16 ||
token->type == CPP_STRING32 ||
token->type == CPP_WSTRING ||
token->type == CPP_UTF8STRING);
}
/* Returns nonzero if TOKEN is a string literal
of a user-defined string literal. */
static bool
cp_parser_is_string_literal (cp_token* token)
{
return (cp_parser_is_pure_string_literal (token) ||
token->type == CPP_STRING_USERDEF ||
token->type == CPP_STRING16_USERDEF ||
token->type == CPP_STRING32_USERDEF ||
token->type == CPP_WSTRING_USERDEF ||
token->type == CPP_UTF8STRING_USERDEF);
}
/* Returns nonzero if TOKEN is the indicated KEYWORD. */
static bool
cp_parser_is_keyword (cp_token* token, enum rid keyword)
{
return token->keyword == keyword;
}
/* If not parsing tentatively, issue a diagnostic of the form
FILE:LINE: MESSAGE before TOKEN
where TOKEN is the next token in the input stream. MESSAGE
(specified by the caller) is usually of the form "expected
OTHER-TOKEN". */
static void
cp_parser_error (cp_parser* parser, const char* gmsgid)
{
if (!cp_parser_simulate_error (parser))
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* This diagnostic makes more sense if it is tagged to the line
of the token we just peeked at. */
cp_lexer_set_source_position_from_token (token);
if (token->type == CPP_PRAGMA)
{
error_at (token->location,
"%<#pragma%> is not allowed here");
cp_parser_skip_to_pragma_eol (parser, token);
return;
}
c_parse_error (gmsgid,
/* Because c_parser_error does not understand
CPP_KEYWORD, keywords are treated like
identifiers. */
(token->type == CPP_KEYWORD ? CPP_NAME : token->type),
token->u.value, token->flags);
}
}
/* Issue an error about name-lookup failing. NAME is the
IDENTIFIER_NODE DECL is the result of
the lookup (as returned from cp_parser_lookup_name). DESIRED is
the thing that we hoped to find. */
static void
cp_parser_name_lookup_error (cp_parser* parser,
tree name,
tree decl,
name_lookup_error desired,
location_t location)
{
/* If name lookup completely failed, tell the user that NAME was not
declared. */
if (decl == error_mark_node)
{
if (parser->scope && parser->scope != global_namespace)
error_at (location, "%<%E::%E%> has not been declared",
parser->scope, name);
else if (parser->scope == global_namespace)
error_at (location, "%<::%E%> has not been declared", name);
else if (parser->object_scope
&& !CLASS_TYPE_P (parser->object_scope))
error_at (location, "request for member %qE in non-class type %qT",
name, parser->object_scope);
else if (parser->object_scope)
error_at (location, "%<%T::%E%> has not been declared",
parser->object_scope, name);
else
error_at (location, "%qE has not been declared", name);
}
else if (parser->scope && parser->scope != global_namespace)
{
switch (desired)
{
case NLE_TYPE:
error_at (location, "%<%E::%E%> is not a type",
parser->scope, name);
break;
case NLE_CXX98:
error_at (location, "%<%E::%E%> is not a class or namespace",
parser->scope, name);
break;
case NLE_NOT_CXX98:
error_at (location,
"%<%E::%E%> is not a class, namespace, or enumeration",
parser->scope, name);
break;
default:
gcc_unreachable ();
}
}
else if (parser->scope == global_namespace)
{
switch (desired)
{
case NLE_TYPE:
error_at (location, "%<::%E%> is not a type", name);
break;
case NLE_CXX98:
error_at (location, "%<::%E%> is not a class or namespace", name);
break;
case NLE_NOT_CXX98:
error_at (location,
"%<::%E%> is not a class, namespace, or enumeration",
name);
break;
default:
gcc_unreachable ();
}
}
else
{
switch (desired)
{
case NLE_TYPE:
error_at (location, "%qE is not a type", name);
break;
case NLE_CXX98:
error_at (location, "%qE is not a class or namespace", name);
break;
case NLE_NOT_CXX98:
error_at (location,
"%qE is not a class, namespace, or enumeration", name);
break;
default:
gcc_unreachable ();
}
}
}
/* If we are parsing tentatively, remember that an error has occurred
during this tentative parse. Returns true if the error was
simulated; false if a message should be issued by the caller. */
static bool
cp_parser_simulate_error (cp_parser* parser)
{
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
{
parser->context->status = CP_PARSER_STATUS_KIND_ERROR;
return true;
}
return false;
}
/* This function is called when a type is defined. If type
definitions are forbidden at this point, an error message is
issued. */
static bool
cp_parser_check_type_definition (cp_parser* parser)
{
/* If types are forbidden here, issue a message. */
if (parser->type_definition_forbidden_message)
{
/* Don't use `%s' to print the string, because quotations (`%<', `%>')
in the message need to be interpreted. */
error (parser->type_definition_forbidden_message);
return false;
}
return true;
}
/* This function is called when the DECLARATOR is processed. The TYPE
was a type defined in the decl-specifiers. If it is invalid to
define a type in the decl-specifiers for DECLARATOR, an error is
issued. TYPE_LOCATION is the location of TYPE and is used
for error reporting. */
static void
cp_parser_check_for_definition_in_return_type (cp_declarator *declarator,
tree type, location_t type_location)
{
/* [dcl.fct] forbids type definitions in return types.
Unfortunately, it's not easy to know whether or not we are
processing a return type until after the fact. */
while (declarator
&& (declarator->kind == cdk_pointer
|| declarator->kind == cdk_reference
|| declarator->kind == cdk_ptrmem))
declarator = declarator->declarator;
if (declarator
&& declarator->kind == cdk_function)
{
error_at (type_location,
"new types may not be defined in a return type");
inform (type_location,
"(perhaps a semicolon is missing after the definition of %qT)",
type);
}
}
/* A type-specifier (TYPE) has been parsed which cannot be followed by
"<" in any valid C++ program. If the next token is indeed "<",
issue a message warning the user about what appears to be an
invalid attempt to form a template-id. LOCATION is the location
of the type-specifier (TYPE) */
static void
cp_parser_check_for_invalid_template_id (cp_parser* parser,
tree type,
enum tag_types tag_type,
location_t location)
{
cp_token_position start = 0;
if (cp_lexer_next_token_is (parser->lexer, CPP_LESS))
{
if (TYPE_P (type))
error_at (location, "%qT is not a template", type);
else if (identifier_p (type))
{
if (tag_type != none_type)
error_at (location, "%qE is not a class template", type);
else
error_at (location, "%qE is not a template", type);
}
else
error_at (location, "invalid template-id");
/* Remember the location of the invalid "<". */
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
start = cp_lexer_token_position (parser->lexer, true);
/* Consume the "<". */
cp_lexer_consume_token (parser->lexer);
/* Parse the template arguments. */
cp_parser_enclosed_template_argument_list (parser);
/* Permanently remove the invalid template arguments so that
this error message is not issued again. */
if (start)
cp_lexer_purge_tokens_after (parser->lexer, start);
}
}
/* If parsing an integral constant-expression, issue an error message
about the fact that THING appeared and return true. Otherwise,
return false. In either case, set
PARSER->NON_INTEGRAL_CONSTANT_EXPRESSION_P. */
static bool
cp_parser_non_integral_constant_expression (cp_parser *parser,
non_integral_constant thing)
{
parser->non_integral_constant_expression_p = true;
if (parser->integral_constant_expression_p)
{
if (!parser->allow_non_integral_constant_expression_p)
{
const char *msg = NULL;
switch (thing)
{
case NIC_FLOAT:
error ("floating-point literal "
"cannot appear in a constant-expression");
return true;
case NIC_CAST:
error ("a cast to a type other than an integral or "
"enumeration type cannot appear in a "
"constant-expression");
return true;
case NIC_TYPEID:
error ("%<typeid%> operator "
"cannot appear in a constant-expression");
return true;
case NIC_NCC:
error ("non-constant compound literals "
"cannot appear in a constant-expression");
return true;
case NIC_FUNC_CALL:
error ("a function call "
"cannot appear in a constant-expression");
return true;
case NIC_INC:
error ("an increment "
"cannot appear in a constant-expression");
return true;
case NIC_DEC:
error ("an decrement "
"cannot appear in a constant-expression");
return true;
case NIC_ARRAY_REF:
error ("an array reference "
"cannot appear in a constant-expression");
return true;
case NIC_ADDR_LABEL:
error ("the address of a label "
"cannot appear in a constant-expression");
return true;
case NIC_OVERLOADED:
error ("calls to overloaded operators "
"cannot appear in a constant-expression");
return true;
case NIC_ASSIGNMENT:
error ("an assignment cannot appear in a constant-expression");
return true;
case NIC_COMMA:
error ("a comma operator "
"cannot appear in a constant-expression");
return true;
case NIC_CONSTRUCTOR:
error ("a call to a constructor "
"cannot appear in a constant-expression");
return true;
case NIC_TRANSACTION:
error ("a transaction expression "
"cannot appear in a constant-expression");
return true;
case NIC_THIS:
msg = "this";
break;
case NIC_FUNC_NAME:
msg = "__FUNCTION__";
break;
case NIC_PRETTY_FUNC:
msg = "__PRETTY_FUNCTION__";
break;
case NIC_C99_FUNC:
msg = "__func__";
break;
case NIC_VA_ARG:
msg = "va_arg";
break;
case NIC_ARROW:
msg = "->";
break;
case NIC_POINT:
msg = ".";
break;
case NIC_STAR:
msg = "*";
break;
case NIC_ADDR:
msg = "&";
break;
case NIC_PREINCREMENT:
msg = "++";
break;
case NIC_PREDECREMENT:
msg = "--";
break;
case NIC_NEW:
msg = "new";
break;
case NIC_DEL:
msg = "delete";
break;
default:
gcc_unreachable ();
}
if (msg)
error ("%qs cannot appear in a constant-expression", msg);
return true;
}
}
return false;
}
/* Emit a diagnostic for an invalid type name. This function commits
to the current active tentative parse, if any. (Otherwise, the
problematic construct might be encountered again later, resulting
in duplicate error messages.) LOCATION is the location of ID. */
static void
cp_parser_diagnose_invalid_type_name (cp_parser *parser, tree id,
location_t location)
{
tree decl, ambiguous_decls;
cp_parser_commit_to_tentative_parse (parser);
/* Try to lookup the identifier. */
decl = cp_parser_lookup_name (parser, id, none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
&ambiguous_decls, location);
if (ambiguous_decls)
/* If the lookup was ambiguous, an error will already have
been issued. */
return;
/* If the lookup found a template-name, it means that the user forgot
to specify an argument list. Emit a useful error message. */
if (TREE_CODE (decl) == TEMPLATE_DECL)
error_at (location,
"invalid use of template-name %qE without an argument list",
decl);
else if (TREE_CODE (id) == BIT_NOT_EXPR)
error_at (location, "invalid use of destructor %qD as a type", id);
else if (TREE_CODE (decl) == TYPE_DECL)
/* Something like 'unsigned A a;' */
error_at (location, "invalid combination of multiple type-specifiers");
else if (!parser->scope)
{
/* Issue an error message. */
error_at (location, "%qE does not name a type", id);
/* If we're in a template class, it's possible that the user was
referring to a type from a base class. For example:
template <typename T> struct A { typedef T X; };
template <typename T> struct B : public A<T> { X x; };
The user should have said "typename A<T>::X". */
if (cxx_dialect < cxx11 && id == ridpointers[(int)RID_CONSTEXPR])
inform (location, "C++11 %<constexpr%> only available with "
"-std=c++11 or -std=gnu++11");
else if (cxx_dialect < cxx11 && id == ridpointers[(int)RID_NOEXCEPT])
inform (location, "C++11 %<noexcept%> only available with "
"-std=c++11 or -std=gnu++11");
else if (cxx_dialect < cxx11
&& TREE_CODE (id) == IDENTIFIER_NODE
&& !strcmp (IDENTIFIER_POINTER (id), "thread_local"))
inform (location, "C++11 %<thread_local%> only available with "
"-std=c++11 or -std=gnu++11");
else if (processing_template_decl && current_class_type
&& TYPE_BINFO (current_class_type))
{
tree b;
for (b = TREE_CHAIN (TYPE_BINFO (current_class_type));
b;
b = TREE_CHAIN (b))
{
tree base_type = BINFO_TYPE (b);
if (CLASS_TYPE_P (base_type)
&& dependent_type_p (base_type))
{
tree field;
/* Go from a particular instantiation of the
template (which will have an empty TYPE_FIELDs),
to the main version. */
base_type = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (base_type);
for (field = TYPE_FIELDS (base_type);
field;
field = DECL_CHAIN (field))
if (TREE_CODE (field) == TYPE_DECL
&& DECL_NAME (field) == id)
{
inform (location,
"(perhaps %<typename %T::%E%> was intended)",
BINFO_TYPE (b), id);
break;
}
if (field)
break;
}
}
}
}
/* Here we diagnose qualified-ids where the scope is actually correct,
but the identifier does not resolve to a valid type name. */
else if (parser->scope != error_mark_node)
{
if (TREE_CODE (parser->scope) == NAMESPACE_DECL)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_LESS))
error_at (location_of (id),
"%qE in namespace %qE does not name a template type",
id, parser->scope);
else
error_at (location_of (id),
"%qE in namespace %qE does not name a type",
id, parser->scope);
}
else if (CLASS_TYPE_P (parser->scope)
&& constructor_name_p (id, parser->scope))
{
/* A<T>::A<T>() */
error_at (location, "%<%T::%E%> names the constructor, not"
" the type", parser->scope, id);
if (cp_lexer_next_token_is (parser->lexer, CPP_LESS))
error_at (location, "and %qT has no template constructors",
parser->scope);
}
else if (TYPE_P (parser->scope)
&& dependent_scope_p (parser->scope))
error_at (location, "need %<typename%> before %<%T::%E%> because "
"%qT is a dependent scope",
parser->scope, id, parser->scope);
else if (TYPE_P (parser->scope))
{
if (cp_lexer_next_token_is (parser->lexer, CPP_LESS))
error_at (location_of (id),
"%qE in %q#T does not name a template type",
id, parser->scope);
else
error_at (location_of (id),
"%qE in %q#T does not name a type",
id, parser->scope);
}
else
gcc_unreachable ();
}
}
/* Check for a common situation where a type-name should be present,
but is not, and issue a sensible error message. Returns true if an
invalid type-name was detected.
The situation handled by this function are variable declarations of the
form `ID a', where `ID' is an id-expression and `a' is a plain identifier.
Usually, `ID' should name a type, but if we got here it means that it
does not. We try to emit the best possible error message depending on
how exactly the id-expression looks like. */
static bool
cp_parser_parse_and_diagnose_invalid_type_name (cp_parser *parser)
{
tree id;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Avoid duplicate error about ambiguous lookup. */
if (token->type == CPP_NESTED_NAME_SPECIFIER)
{
cp_token *next = cp_lexer_peek_nth_token (parser->lexer, 2);
if (next->type == CPP_NAME && next->error_reported)
goto out;
}
cp_parser_parse_tentatively (parser);
id = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/true,
/*optional_p=*/false);
/* If the next token is a (, this is a function with no explicit return
type, i.e. constructor, destructor or conversion op. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)
|| TREE_CODE (id) == TYPE_DECL)
{
cp_parser_abort_tentative_parse (parser);
return false;
}
if (!cp_parser_parse_definitely (parser))
return false;
/* Emit a diagnostic for the invalid type. */
cp_parser_diagnose_invalid_type_name (parser, id, token->location);
out:
/* If we aren't in the middle of a declarator (i.e. in a
parameter-declaration-clause), skip to the end of the declaration;
there's no point in trying to process it. */
if (!parser->in_declarator_p)
cp_parser_skip_to_end_of_block_or_statement (parser);
return true;
}
/* Consume tokens up to, and including, the next non-nested closing `)'.
Returns 1 iff we found a closing `)'. RECOVERING is true, if we
are doing error recovery. Returns -1 if OR_COMMA is true and we
found an unnested comma. */
static int
cp_parser_skip_to_closing_parenthesis (cp_parser *parser,
bool recovering,
bool or_comma,
bool consume_paren)
{
unsigned paren_depth = 0;
unsigned brace_depth = 0;
unsigned square_depth = 0;
if (recovering && !or_comma
&& cp_parser_uncommitted_to_tentative_parse_p (parser))
return 0;
while (true)
{
cp_token * token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, then there is no closing `)'. */
return 0;
/* This is good for lambda expression capture-lists. */
case CPP_OPEN_SQUARE:
++square_depth;
break;
case CPP_CLOSE_SQUARE:
if (!square_depth--)
return 0;
break;
case CPP_SEMICOLON:
/* This matches the processing in skip_to_end_of_statement. */
if (!brace_depth)
return 0;
break;
case CPP_OPEN_BRACE:
++brace_depth;
break;
case CPP_CLOSE_BRACE:
if (!brace_depth--)
return 0;
break;
case CPP_COMMA:
if (recovering && or_comma && !brace_depth && !paren_depth
&& !square_depth)
return -1;
break;
case CPP_OPEN_PAREN:
if (!brace_depth)
++paren_depth;
break;
case CPP_CLOSE_PAREN:
if (!brace_depth && !paren_depth--)
{
if (consume_paren)
cp_lexer_consume_token (parser->lexer);
return 1;
}
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Consume tokens until we reach the end of the current statement.
Normally, that will be just before consuming a `;'. However, if a
non-nested `}' comes first, then we stop before consuming that. */
static void
cp_parser_skip_to_end_of_statement (cp_parser* parser)
{
unsigned nesting_depth = 0;
/* Unwind generic function template scope if necessary. */
if (parser->fully_implicit_function_template_p)
finish_fully_implicit_template (parser, /*member_decl_opt=*/0);
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, stop. */
return;
case CPP_SEMICOLON:
/* If the next token is a `;', we have reached the end of the
statement. */
if (!nesting_depth)
return;
break;
case CPP_CLOSE_BRACE:
/* If this is a non-nested '}', stop before consuming it.
That way, when confronted with something like:
{ 3 + }
we stop before consuming the closing '}', even though we
have not yet reached a `;'. */
if (nesting_depth == 0)
return;
/* If it is the closing '}' for a block that we have
scanned, stop -- but only after consuming the token.
That way given:
void f g () { ... }
typedef int I;
we will stop after the body of the erroneously declared
function, but before consuming the following `typedef'
declaration. */
if (--nesting_depth == 0)
{
cp_lexer_consume_token (parser->lexer);
return;
}
case CPP_OPEN_BRACE:
++nesting_depth;
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* This function is called at the end of a statement or declaration.
If the next token is a semicolon, it is consumed; otherwise, error
recovery is attempted. */
static void
cp_parser_consume_semicolon_at_end_of_statement (cp_parser *parser)
{
/* Look for the trailing `;'. */
if (!cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON))
{
/* If there is additional (erroneous) input, skip to the end of
the statement. */
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
}
}
/* Skip tokens until we have consumed an entire block, or until we
have consumed a non-nested `;'. */
static void
cp_parser_skip_to_end_of_block_or_statement (cp_parser* parser)
{
int nesting_depth = 0;
/* Unwind generic function template scope if necessary. */
if (parser->fully_implicit_function_template_p)
finish_fully_implicit_template (parser, /*member_decl_opt=*/0);
while (nesting_depth >= 0)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, stop. */
return;
case CPP_SEMICOLON:
/* Stop if this is an unnested ';'. */
if (!nesting_depth)
nesting_depth = -1;
break;
case CPP_CLOSE_BRACE:
/* Stop if this is an unnested '}', or closes the outermost
nesting level. */
nesting_depth--;
if (nesting_depth < 0)
return;
if (!nesting_depth)
nesting_depth = -1;
break;
case CPP_OPEN_BRACE:
/* Nest. */
nesting_depth++;
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Skip tokens until a non-nested closing curly brace is the next
token, or there are no more tokens. Return true in the first case,
false otherwise. */
static bool
cp_parser_skip_to_closing_brace (cp_parser *parser)
{
unsigned nesting_depth = 0;
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, stop. */
return false;
case CPP_CLOSE_BRACE:
/* If the next token is a non-nested `}', then we have reached
the end of the current block. */
if (nesting_depth-- == 0)
return true;
break;
case CPP_OPEN_BRACE:
/* If it the next token is a `{', then we are entering a new
block. Consume the entire block. */
++nesting_depth;
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Consume tokens until we reach the end of the pragma. The PRAGMA_TOK
parameter is the PRAGMA token, allowing us to purge the entire pragma
sequence. */
static void
cp_parser_skip_to_pragma_eol (cp_parser* parser, cp_token *pragma_tok)
{
cp_token *token;
parser->lexer->in_pragma = false;
do
token = cp_lexer_consume_token (parser->lexer);
while (token->type != CPP_PRAGMA_EOL && token->type != CPP_EOF);
/* Ensure that the pragma is not parsed again. */
cp_lexer_purge_tokens_after (parser->lexer, pragma_tok);
}
/* Require pragma end of line, resyncing with it as necessary. The
arguments are as for cp_parser_skip_to_pragma_eol. */
static void
cp_parser_require_pragma_eol (cp_parser *parser, cp_token *pragma_tok)
{
parser->lexer->in_pragma = false;
if (!cp_parser_require (parser, CPP_PRAGMA_EOL, RT_PRAGMA_EOL))
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
}
/* This is a simple wrapper around make_typename_type. When the id is
an unresolved identifier node, we can provide a superior diagnostic
using cp_parser_diagnose_invalid_type_name. */
static tree
cp_parser_make_typename_type (cp_parser *parser, tree id,
location_t id_location)
{
tree result;
if (identifier_p (id))
{
result = make_typename_type (parser->scope, id, typename_type,
/*complain=*/tf_none);
if (result == error_mark_node)
cp_parser_diagnose_invalid_type_name (parser, id, id_location);
return result;
}
return make_typename_type (parser->scope, id, typename_type, tf_error);
}
/* This is a wrapper around the
make_{pointer,ptrmem,reference}_declarator functions that decides
which one to call based on the CODE and CLASS_TYPE arguments. The
CODE argument should be one of the values returned by
cp_parser_ptr_operator. ATTRIBUTES represent the attributes that
appertain to the pointer or reference. */
static cp_declarator *
cp_parser_make_indirect_declarator (enum tree_code code, tree class_type,
cp_cv_quals cv_qualifiers,
cp_declarator *target,
tree attributes)
{
if (code == ERROR_MARK)
return cp_error_declarator;
if (code == INDIRECT_REF)
if (class_type == NULL_TREE)
return make_pointer_declarator (cv_qualifiers, target, attributes);
else
return make_ptrmem_declarator (cv_qualifiers, class_type,
target, attributes);
else if (code == ADDR_EXPR && class_type == NULL_TREE)
return make_reference_declarator (cv_qualifiers, target,
false, attributes);
else if (code == NON_LVALUE_EXPR && class_type == NULL_TREE)
return make_reference_declarator (cv_qualifiers, target,
true, attributes);
gcc_unreachable ();
}
/* Create a new C++ parser. */
static cp_parser *
cp_parser_new (void)
{
cp_parser *parser;
cp_lexer *lexer;
unsigned i;
/* cp_lexer_new_main is called before doing GC allocation because
cp_lexer_new_main might load a PCH file. */
lexer = cp_lexer_new_main ();
/* Initialize the binops_by_token so that we can get the tree
directly from the token. */
for (i = 0; i < sizeof (binops) / sizeof (binops[0]); i++)
binops_by_token[binops[i].token_type] = binops[i];
parser = ggc_cleared_alloc<cp_parser> ();
parser->lexer = lexer;
parser->context = cp_parser_context_new (NULL);
/* For now, we always accept GNU extensions. */
parser->allow_gnu_extensions_p = 1;
/* The `>' token is a greater-than operator, not the end of a
template-id. */
parser->greater_than_is_operator_p = true;
parser->default_arg_ok_p = true;
/* We are not parsing a constant-expression. */
parser->integral_constant_expression_p = false;
parser->allow_non_integral_constant_expression_p = false;
parser->non_integral_constant_expression_p = false;
/* Local variable names are not forbidden. */
parser->local_variables_forbidden_p = false;
/* We are not processing an `extern "C"' declaration. */
parser->in_unbraced_linkage_specification_p = false;
/* We are not processing a declarator. */
parser->in_declarator_p = false;
/* We are not processing a template-argument-list. */
parser->in_template_argument_list_p = false;
/* We are not in an iteration statement. */
parser->in_statement = 0;
/* We are not in a switch statement. */
parser->in_switch_statement_p = false;
/* We are not parsing a type-id inside an expression. */
parser->in_type_id_in_expr_p = false;
/* Declarations aren't implicitly extern "C". */
parser->implicit_extern_c = false;
/* String literals should be translated to the execution character set. */
parser->translate_strings_p = true;
/* We are not parsing a function body. */
parser->in_function_body = false;
/* We can correct until told otherwise. */
parser->colon_corrects_to_scope_p = true;
/* The unparsed function queue is empty. */
push_unparsed_function_queues (parser);
/* There are no classes being defined. */
parser->num_classes_being_defined = 0;
/* No template parameters apply. */
parser->num_template_parameter_lists = 0;
/* Not declaring an implicit function template. */
parser->auto_is_implicit_function_template_parm_p = false;
parser->fully_implicit_function_template_p = false;
parser->implicit_template_parms = 0;
parser->implicit_template_scope = 0;
return parser;
}
/* Create a cp_lexer structure which will emit the tokens in CACHE
and push it onto the parser's lexer stack. This is used for delayed
parsing of in-class method bodies and default arguments, and should
not be confused with tentative parsing. */
static void
cp_parser_push_lexer_for_tokens (cp_parser *parser, cp_token_cache *cache)
{
cp_lexer *lexer = cp_lexer_new_from_tokens (cache);
lexer->next = parser->lexer;
parser->lexer = lexer;
/* Move the current source position to that of the first token in the
new lexer. */
cp_lexer_set_source_position_from_token (lexer->next_token);
}
/* Pop the top lexer off the parser stack. This is never used for the
"main" lexer, only for those pushed by cp_parser_push_lexer_for_tokens. */
static void
cp_parser_pop_lexer (cp_parser *parser)
{
cp_lexer *lexer = parser->lexer;
parser->lexer = lexer->next;
cp_lexer_destroy (lexer);
/* Put the current source position back where it was before this
lexer was pushed. */
cp_lexer_set_source_position_from_token (parser->lexer->next_token);
}
/* Lexical conventions [gram.lex] */
/* Parse an identifier. Returns an IDENTIFIER_NODE representing the
identifier. */
static tree
cp_parser_identifier (cp_parser* parser)
{
cp_token *token;
/* Look for the identifier. */
token = cp_parser_require (parser, CPP_NAME, RT_NAME);
/* Return the value. */
return token ? token->u.value : error_mark_node;
}
/* Parse a sequence of adjacent string constants. Returns a
TREE_STRING representing the combined, nul-terminated string
constant. If TRANSLATE is true, translate the string to the
execution character set. If WIDE_OK is true, a wide string is
invalid here.
C++98 [lex.string] says that if a narrow string literal token is
adjacent to a wide string literal token, the behavior is undefined.
However, C99 6.4.5p4 says that this results in a wide string literal.
We follow C99 here, for consistency with the C front end.
This code is largely lifted from lex_string() in c-lex.c.
FUTURE: ObjC++ will need to handle @-strings here. */
static tree
cp_parser_string_literal (cp_parser *parser, bool translate, bool wide_ok,
bool lookup_udlit = true)
{
tree value;
size_t count;
struct obstack str_ob;
cpp_string str, istr, *strs;
cp_token *tok;
enum cpp_ttype type, curr_type;
int have_suffix_p = 0;
tree string_tree;
tree suffix_id = NULL_TREE;
bool curr_tok_is_userdef_p = false;
tok = cp_lexer_peek_token (parser->lexer);
if (!cp_parser_is_string_literal (tok))
{
cp_parser_error (parser, "expected string-literal");
return error_mark_node;
}
if (cpp_userdef_string_p (tok->type))
{
string_tree = USERDEF_LITERAL_VALUE (tok->u.value);
curr_type = cpp_userdef_string_remove_type (tok->type);
curr_tok_is_userdef_p = true;
}
else
{
string_tree = tok->u.value;
curr_type = tok->type;
}
type = curr_type;
/* Try to avoid the overhead of creating and destroying an obstack
for the common case of just one string. */
if (!cp_parser_is_string_literal
(cp_lexer_peek_nth_token (parser->lexer, 2)))
{
cp_lexer_consume_token (parser->lexer);
str.text = (const unsigned char *)TREE_STRING_POINTER (string_tree);
str.len = TREE_STRING_LENGTH (string_tree);
count = 1;
if (curr_tok_is_userdef_p)
{
suffix_id = USERDEF_LITERAL_SUFFIX_ID (tok->u.value);
have_suffix_p = 1;
curr_type = cpp_userdef_string_remove_type (tok->type);
}
else
curr_type = tok->type;
strs = &str;
}
else
{
gcc_obstack_init (&str_ob);
count = 0;
do
{
cp_lexer_consume_token (parser->lexer);
count++;
str.text = (const unsigned char *)TREE_STRING_POINTER (string_tree);
str.len = TREE_STRING_LENGTH (string_tree);
if (curr_tok_is_userdef_p)
{
tree curr_suffix_id = USERDEF_LITERAL_SUFFIX_ID (tok->u.value);
if (have_suffix_p == 0)
{
suffix_id = curr_suffix_id;
have_suffix_p = 1;
}
else if (have_suffix_p == 1
&& curr_suffix_id != suffix_id)
{
error ("inconsistent user-defined literal suffixes"
" %qD and %qD in string literal",
suffix_id, curr_suffix_id);
have_suffix_p = -1;
}
curr_type = cpp_userdef_string_remove_type (tok->type);
}
else
curr_type = tok->type;
if (type != curr_type)
{
if (type == CPP_STRING)
type = curr_type;
else if (curr_type != CPP_STRING)
error_at (tok->location,
"unsupported non-standard concatenation "
"of string literals");
}
obstack_grow (&str_ob, &str, sizeof (cpp_string));
tok = cp_lexer_peek_token (parser->lexer);
if (cpp_userdef_string_p (tok->type))
{
string_tree = USERDEF_LITERAL_VALUE (tok->u.value);
curr_type = cpp_userdef_string_remove_type (tok->type);
curr_tok_is_userdef_p = true;
}
else
{
string_tree = tok->u.value;
curr_type = tok->type;
curr_tok_is_userdef_p = false;
}
}
while (cp_parser_is_string_literal (tok));
strs = (cpp_string *) obstack_finish (&str_ob);
}
if (type != CPP_STRING && !wide_ok)
{
cp_parser_error (parser, "a wide string is invalid in this context");
type = CPP_STRING;
}
if ((translate ? cpp_interpret_string : cpp_interpret_string_notranslate)
(parse_in, strs, count, &istr, type))
{
value = build_string (istr.len, (const char *)istr.text);
free (CONST_CAST (unsigned char *, istr.text));
switch (type)
{
default:
case CPP_STRING:
case CPP_UTF8STRING:
TREE_TYPE (value) = char_array_type_node;
break;
case CPP_STRING16:
TREE_TYPE (value) = char16_array_type_node;
break;
case CPP_STRING32:
TREE_TYPE (value) = char32_array_type_node;
break;
case CPP_WSTRING:
TREE_TYPE (value) = wchar_array_type_node;
break;
}
value = fix_string_type (value);
if (have_suffix_p)
{
tree literal = build_userdef_literal (suffix_id, value,
OT_NONE, NULL_TREE);
if (lookup_udlit)
value = cp_parser_userdef_string_literal (literal);
else
value = literal;
}
}
else
/* cpp_interpret_string has issued an error. */
value = error_mark_node;
if (count > 1)
obstack_free (&str_ob, 0);
return value;
}
/* Look up a literal operator with the name and the exact arguments. */
static tree
lookup_literal_operator (tree name, vec<tree, va_gc> *args)
{
tree decl, fns;
decl = lookup_name (name);
if (!decl || !is_overloaded_fn (decl))
return error_mark_node;
for (fns = decl; fns; fns = OVL_NEXT (fns))
{
unsigned int ix;
bool found = true;
tree fn = OVL_CURRENT (fns);
tree parmtypes = TYPE_ARG_TYPES (TREE_TYPE (fn));
if (parmtypes != NULL_TREE)
{
for (ix = 0; ix < vec_safe_length (args) && parmtypes != NULL_TREE;
++ix, parmtypes = TREE_CHAIN (parmtypes))
{
tree tparm = TREE_VALUE (parmtypes);
tree targ = TREE_TYPE ((*args)[ix]);
bool ptr = TYPE_PTR_P (tparm);
bool arr = TREE_CODE (targ) == ARRAY_TYPE;
if ((ptr || arr || !same_type_p (tparm, targ))
&& (!ptr || !arr
|| !same_type_p (TREE_TYPE (tparm),
TREE_TYPE (targ))))
found = false;
}
if (found
&& ix == vec_safe_length (args)
/* May be this should be sufficient_parms_p instead,
depending on how exactly should user-defined literals
work in presence of default arguments on the literal
operator parameters. */
&& parmtypes == void_list_node)
return decl;
}
}
return error_mark_node;
}
/* Parse a user-defined char constant. Returns a call to a user-defined
literal operator taking the character as an argument. */
static tree
cp_parser_userdef_char_literal (cp_parser *parser)
{
cp_token *token = cp_lexer_consume_token (parser->lexer);
tree literal = token->u.value;
tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal);
tree value = USERDEF_LITERAL_VALUE (literal);
tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id));
tree decl, result;
/* Build up a call to the user-defined operator */
/* Lookup the name we got back from the id-expression. */
vec<tree, va_gc> *args = make_tree_vector ();
vec_safe_push (args, value);
decl = lookup_literal_operator (name, args);
if (!decl || decl == error_mark_node)
{
error ("unable to find character literal operator %qD with %qT argument",
name, TREE_TYPE (value));
release_tree_vector (args);
return error_mark_node;
}
result = finish_call_expr (decl, &args, false, true, tf_warning_or_error);
release_tree_vector (args);
return result;
}
/* A subroutine of cp_parser_userdef_numeric_literal to
create a char... template parameter pack from a string node. */
static tree
make_char_string_pack (tree value)
{
tree charvec;
tree argpack = make_node (NONTYPE_ARGUMENT_PACK);
const char *str = TREE_STRING_POINTER (value);
int i, len = TREE_STRING_LENGTH (value) - 1;
tree argvec = make_tree_vec (1);
/* Fill in CHARVEC with all of the parameters. */
charvec = make_tree_vec (len);
for (i = 0; i < len; ++i)
TREE_VEC_ELT (charvec, i) = build_int_cst (char_type_node, str[i]);
/* Build the argument packs. */
SET_ARGUMENT_PACK_ARGS (argpack, charvec);
TREE_TYPE (argpack) = char_type_node;
TREE_VEC_ELT (argvec, 0) = argpack;
return argvec;
}
/* A subroutine of cp_parser_userdef_numeric_literal to
create a char... template parameter pack from a string node. */
static tree
make_string_pack (tree value)
{
tree charvec;
tree argpack = make_node (NONTYPE_ARGUMENT_PACK);
const unsigned char *str
= (const unsigned char *) TREE_STRING_POINTER (value);
int sz = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (value))));
int len = TREE_STRING_LENGTH (value) / sz - 1;
tree argvec = make_tree_vec (2);
tree str_char_type_node = TREE_TYPE (TREE_TYPE (value));
str_char_type_node = TYPE_MAIN_VARIANT (str_char_type_node);
/* First template parm is character type. */
TREE_VEC_ELT (argvec, 0) = str_char_type_node;
/* Fill in CHARVEC with all of the parameters. */
charvec = make_tree_vec (len);
for (int i = 0; i < len; ++i)
TREE_VEC_ELT (charvec, i)
= double_int_to_tree (str_char_type_node,
double_int::from_buffer (str + i * sz, sz));
/* Build the argument packs. */
SET_ARGUMENT_PACK_ARGS (argpack, charvec);
TREE_TYPE (argpack) = str_char_type_node;
TREE_VEC_ELT (argvec, 1) = argpack;
return argvec;
}
/* Parse a user-defined numeric constant. returns a call to a user-defined
literal operator. */
static tree
cp_parser_userdef_numeric_literal (cp_parser *parser)
{
cp_token *token = cp_lexer_consume_token (parser->lexer);
tree literal = token->u.value;
tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal);
tree value = USERDEF_LITERAL_VALUE (literal);
int overflow = USERDEF_LITERAL_OVERFLOW (literal);
tree num_string = USERDEF_LITERAL_NUM_STRING (literal);
tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id));
tree decl, result;
vec<tree, va_gc> *args;
/* Look for a literal operator taking the exact type of numeric argument
as the literal value. */
args = make_tree_vector ();
vec_safe_push (args, value);
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
result = finish_call_expr (decl, &args, false, true,
tf_warning_or_error);
if (TREE_CODE (TREE_TYPE (value)) == INTEGER_TYPE && overflow > 0)
{
warning_at (token->location, OPT_Woverflow,
"integer literal exceeds range of %qT type",
long_long_unsigned_type_node);
}
else
{
if (overflow > 0)
warning_at (token->location, OPT_Woverflow,
"floating literal exceeds range of %qT type",
long_double_type_node);
else if (overflow < 0)
warning_at (token->location, OPT_Woverflow,
"floating literal truncated to zero");
}
release_tree_vector (args);
return result;
}
release_tree_vector (args);
/* If the numeric argument didn't work, look for a raw literal
operator taking a const char* argument consisting of the number
in string format. */
args = make_tree_vector ();
vec_safe_push (args, num_string);
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
result = finish_call_expr (decl, &args, false, true,
tf_warning_or_error);
release_tree_vector (args);
return result;
}
release_tree_vector (args);
/* If the raw literal didn't work, look for a non-type template
function with parameter pack char.... Call the function with
template parameter characters representing the number. */
args = make_tree_vector ();
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
tree tmpl_args = make_char_string_pack (num_string);
decl = lookup_template_function (decl, tmpl_args);
result = finish_call_expr (decl, &args, false, true,
tf_warning_or_error);
release_tree_vector (args);
return result;
}
release_tree_vector (args);
error ("unable to find numeric literal operator %qD", name);
if (!cpp_get_options (parse_in)->ext_numeric_literals)
inform (token->location, "use -std=gnu++11 or -fext-numeric-literals "
"to enable more built-in suffixes");
return error_mark_node;
}
/* Parse a user-defined string constant. Returns a call to a user-defined
literal operator taking a character pointer and the length of the string
as arguments. */
static tree
cp_parser_userdef_string_literal (tree literal)
{
tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal);
tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id));
tree value = USERDEF_LITERAL_VALUE (literal);
int len = TREE_STRING_LENGTH (value)
/ TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (value)))) - 1;
tree decl, result;
vec<tree, va_gc> *args;
/* Build up a call to the user-defined operator. */
/* Lookup the name we got back from the id-expression. */
args = make_tree_vector ();
vec_safe_push (args, value);
vec_safe_push (args, build_int_cst (size_type_node, len));
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
result = finish_call_expr (decl, &args, false, true,
tf_warning_or_error);
release_tree_vector (args);
return result;
}
release_tree_vector (args);
/* Look for a template function with typename parameter CharT
and parameter pack CharT... Call the function with
template parameter characters representing the string. */
args = make_tree_vector ();
decl = lookup_literal_operator (name, args);
if (decl && decl != error_mark_node)
{
tree tmpl_args = make_string_pack (value);
decl = lookup_template_function (decl, tmpl_args);
result = finish_call_expr (decl, &args, false, true,
tf_warning_or_error);
release_tree_vector (args);
return result;
}
release_tree_vector (args);
error ("unable to find string literal operator %qD with %qT, %qT arguments",
name, TREE_TYPE (value), size_type_node);
return error_mark_node;
}
/* Basic concepts [gram.basic] */
/* Parse a translation-unit.
translation-unit:
declaration-seq [opt]
Returns TRUE if all went well. */
static bool
cp_parser_translation_unit (cp_parser* parser)
{
/* The address of the first non-permanent object on the declarator
obstack. */
static void *declarator_obstack_base;
bool success;
/* Create the declarator obstack, if necessary. */
if (!cp_error_declarator)
{
gcc_obstack_init (&declarator_obstack);
/* Create the error declarator. */
cp_error_declarator = make_declarator (cdk_error);
/* Create the empty parameter list. */
no_parameters = make_parameter_declarator (NULL, NULL, NULL_TREE);
/* Remember where the base of the declarator obstack lies. */
declarator_obstack_base = obstack_next_free (&declarator_obstack);
}
cp_parser_declaration_seq_opt (parser);
/* If there are no tokens left then all went well. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EOF))
{
/* Get rid of the token array; we don't need it any more. */
cp_lexer_destroy (parser->lexer);
parser->lexer = NULL;
/* This file might have been a context that's implicitly extern
"C". If so, pop the lang context. (Only relevant for PCH.) */
if (parser->implicit_extern_c)
{
pop_lang_context ();
parser->implicit_extern_c = false;
}
/* Finish up. */
finish_translation_unit ();
success = true;
}
else
{
cp_parser_error (parser, "expected declaration");
success = false;
}
/* Make sure the declarator obstack was fully cleaned up. */
gcc_assert (obstack_next_free (&declarator_obstack)
== declarator_obstack_base);
/* All went well. */
return success;
}
/* Return the appropriate tsubst flags for parsing, possibly in N3276
decltype context. */
static inline tsubst_flags_t
complain_flags (bool decltype_p)
{
tsubst_flags_t complain = tf_warning_or_error;
if (decltype_p)
complain |= tf_decltype;
return complain;
}
/* We're about to parse a collection of statements. If we're currently
parsing tentatively, set up a firewall so that any nested
cp_parser_commit_to_tentative_parse won't affect the current context. */
static cp_token_position
cp_parser_start_tentative_firewall (cp_parser *parser)
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
return 0;
cp_parser_parse_tentatively (parser);
cp_parser_commit_to_topmost_tentative_parse (parser);
return cp_lexer_token_position (parser->lexer, false);
}
/* We've finished parsing the collection of statements. Wrap up the
firewall and replace the relevant tokens with the parsed form. */
static void
cp_parser_end_tentative_firewall (cp_parser *parser, cp_token_position start,
tree expr)
{
if (!start)
return;
/* Finish the firewall level. */
cp_parser_parse_definitely (parser);
/* And remember the result of the parse for when we try again. */
cp_token *token = cp_lexer_token_at (parser->lexer, start);
token->type = CPP_PREPARSED_EXPR;
token->u.value = expr;
token->keyword = RID_MAX;
cp_lexer_purge_tokens_after (parser->lexer, start);
}
/* Parse a GNU statement-expression, i.e. ({ stmts }), except for the
enclosing parentheses. */
static tree
cp_parser_statement_expr (cp_parser *parser)
{
cp_token_position start = cp_parser_start_tentative_firewall (parser);
/* Consume the '('. */
cp_lexer_consume_token (parser->lexer);
/* Start the statement-expression. */
tree expr = begin_stmt_expr ();
/* Parse the compound-statement. */
cp_parser_compound_statement (parser, expr, false, false);
/* Finish up. */
expr = finish_stmt_expr (expr, false);
/* Consume the ')'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_end_of_statement (parser);
cp_parser_end_tentative_firewall (parser, start, expr);
return expr;
}
/* Expressions [gram.expr] */
/* Parse a primary-expression.
primary-expression:
literal
this
( expression )
id-expression
lambda-expression (C++11)
GNU Extensions:
primary-expression:
( compound-statement )
__builtin_va_arg ( assignment-expression , type-id )
__builtin_offsetof ( type-id , offsetof-expression )
C++ Extensions:
__has_nothrow_assign ( type-id )
__has_nothrow_constructor ( type-id )
__has_nothrow_copy ( type-id )
__has_trivial_assign ( type-id )
__has_trivial_constructor ( type-id )
__has_trivial_copy ( type-id )
__has_trivial_destructor ( type-id )
__has_virtual_destructor ( type-id )
__is_abstract ( type-id )
__is_base_of ( type-id , type-id )
__is_class ( type-id )
__is_empty ( type-id )
__is_enum ( type-id )
__is_final ( type-id )
__is_literal_type ( type-id )
__is_pod ( type-id )
__is_polymorphic ( type-id )
__is_std_layout ( type-id )
__is_trivial ( type-id )
__is_union ( type-id )
Objective-C++ Extension:
primary-expression:
objc-expression
literal:
__null
ADDRESS_P is true iff this expression was immediately preceded by
"&" and therefore might denote a pointer-to-member. CAST_P is true
iff this expression is the target of a cast. TEMPLATE_ARG_P is
true iff this expression is a template argument.
Returns a representation of the expression. Upon return, *IDK
indicates what kind of id-expression (if any) was present. */
static tree
cp_parser_primary_expression (cp_parser *parser,
bool address_p,
bool cast_p,
bool template_arg_p,
bool decltype_p,
cp_id_kind *idk)
{
cp_token *token = NULL;
/* Assume the primary expression is not an id-expression. */
*idk = CP_ID_KIND_NONE;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch ((int) token->type)
{
/* literal:
integer-literal
character-literal
floating-literal
string-literal
boolean-literal
pointer-literal
user-defined-literal */
case CPP_CHAR:
case CPP_CHAR16:
case CPP_CHAR32:
case CPP_WCHAR:
case CPP_NUMBER:
case CPP_PREPARSED_EXPR:
if (TREE_CODE (token->u.value) == USERDEF_LITERAL)
return cp_parser_userdef_numeric_literal (parser);
token = cp_lexer_consume_token (parser->lexer);
if (TREE_CODE (token->u.value) == FIXED_CST)
{
error_at (token->location,
"fixed-point types not supported in C++");
return error_mark_node;
}
/* Floating-point literals are only allowed in an integral
constant expression if they are cast to an integral or
enumeration type. */
if (TREE_CODE (token->u.value) == REAL_CST
&& parser->integral_constant_expression_p
&& pedantic)
{
/* CAST_P will be set even in invalid code like "int(2.7 +
...)". Therefore, we have to check that the next token
is sure to end the cast. */
if (cast_p)
{
cp_token *next_token;
next_token = cp_lexer_peek_token (parser->lexer);
if (/* The comma at the end of an
enumerator-definition. */
next_token->type != CPP_COMMA
/* The curly brace at the end of an enum-specifier. */
&& next_token->type != CPP_CLOSE_BRACE
/* The end of a statement. */
&& next_token->type != CPP_SEMICOLON
/* The end of the cast-expression. */
&& next_token->type != CPP_CLOSE_PAREN
/* The end of an array bound. */
&& next_token->type != CPP_CLOSE_SQUARE
/* The closing ">" in a template-argument-list. */
&& (next_token->type != CPP_GREATER
|| parser->greater_than_is_operator_p)
/* C++0x only: A ">>" treated like two ">" tokens,
in a template-argument-list. */
&& (next_token->type != CPP_RSHIFT
|| (cxx_dialect == cxx98)
|| parser->greater_than_is_operator_p))
cast_p = false;
}
/* If we are within a cast, then the constraint that the
cast is to an integral or enumeration type will be
checked at that point. If we are not within a cast, then
this code is invalid. */
if (!cast_p)
cp_parser_non_integral_constant_expression (parser, NIC_FLOAT);
}
return token->u.value;
case CPP_CHAR_USERDEF:
case CPP_CHAR16_USERDEF:
case CPP_CHAR32_USERDEF:
case CPP_WCHAR_USERDEF:
return cp_parser_userdef_char_literal (parser);
case CPP_STRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_WSTRING:
case CPP_UTF8STRING:
case CPP_STRING_USERDEF:
case CPP_STRING16_USERDEF:
case CPP_STRING32_USERDEF:
case CPP_WSTRING_USERDEF:
case CPP_UTF8STRING_USERDEF:
/* ??? Should wide strings be allowed when parser->translate_strings_p
is false (i.e. in attributes)? If not, we can kill the third
argument to cp_parser_string_literal. */
return cp_parser_string_literal (parser,
parser->translate_strings_p,
true);
case CPP_OPEN_PAREN:
/* If we see `( { ' then we are looking at the beginning of
a GNU statement-expression. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_nth_token_is (parser->lexer, 2, CPP_OPEN_BRACE))
{
/* Statement-expressions are not allowed by the standard. */
pedwarn (token->location, OPT_Wpedantic,
"ISO C++ forbids braced-groups within expressions");
/* And they're not allowed outside of a function-body; you
cannot, for example, write:
int i = ({ int j = 3; j + 1; });
at class or namespace scope. */
if (!parser->in_function_body
|| parser->in_template_argument_list_p)
{
error_at (token->location,
"statement-expressions are not allowed outside "
"functions nor in template-argument lists");
cp_parser_skip_to_end_of_block_or_statement (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
cp_lexer_consume_token (parser->lexer);
return error_mark_node;
}
else
return cp_parser_statement_expr (parser);
}
/* Otherwise it's a normal parenthesized expression. */
{
tree expr;
bool saved_greater_than_is_operator_p;
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Within a parenthesized expression, a `>' token is always
the greater-than operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
/* Parse the parenthesized expression. */
expr = cp_parser_expression (parser, idk, cast_p, decltype_p);
/* Let the front end know that this expression was
enclosed in parentheses. This matters in case, for
example, the expression is of the form `A::B', since
`&A::B' might be a pointer-to-member, but `&(A::B)' is
not. */
expr = finish_parenthesized_expr (expr);
/* DR 705: Wrapping an unqualified name in parentheses
suppresses arg-dependent lookup. We want to pass back
CP_ID_KIND_QUALIFIED for suppressing vtable lookup
(c++/37862), but none of the others. */
if (*idk != CP_ID_KIND_QUALIFIED)
*idk = CP_ID_KIND_NONE;
/* The `>' token might be the end of a template-id or
template-parameter-list now. */
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
/* Consume the `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_end_of_statement (parser);
return expr;
}
case CPP_OPEN_SQUARE:
{
if (c_dialect_objc ())
{
/* We might have an Objective-C++ message. */
cp_parser_parse_tentatively (parser);
tree msg = cp_parser_objc_message_expression (parser);
/* If that works out, we're done ... */
if (cp_parser_parse_definitely (parser))
return msg;
/* ... else, fall though to see if it's a lambda. */
}
tree lam = cp_parser_lambda_expression (parser);
/* Don't warn about a failed tentative parse. */
if (cp_parser_error_occurred (parser))
return error_mark_node;
maybe_warn_cpp0x (CPP0X_LAMBDA_EXPR);
return lam;
}
case CPP_OBJC_STRING:
if (c_dialect_objc ())
/* We have an Objective-C++ string literal. */
return cp_parser_objc_expression (parser);
cp_parser_error (parser, "expected primary-expression");
return error_mark_node;
case CPP_KEYWORD:
switch (token->keyword)
{
/* These two are the boolean literals. */
case RID_TRUE:
cp_lexer_consume_token (parser->lexer);
return boolean_true_node;
case RID_FALSE:
cp_lexer_consume_token (parser->lexer);
return boolean_false_node;
/* The `__null' literal. */
case RID_NULL:
cp_lexer_consume_token (parser->lexer);
return null_node;
/* The `nullptr' literal. */
case RID_NULLPTR:
cp_lexer_consume_token (parser->lexer);
return nullptr_node;
/* Recognize the `this' keyword. */
case RID_THIS:
cp_lexer_consume_token (parser->lexer);
if (parser->local_variables_forbidden_p)
{
error_at (token->location,
"%<this%> may not be used in this context");
return error_mark_node;
}
/* Pointers cannot appear in constant-expressions. */
if (cp_parser_non_integral_constant_expression (parser, NIC_THIS))
return error_mark_node;
return finish_this_expr ();
/* The `operator' keyword can be the beginning of an
id-expression. */
case RID_OPERATOR:
goto id_expression;
case RID_FUNCTION_NAME:
case RID_PRETTY_FUNCTION_NAME:
case RID_C99_FUNCTION_NAME:
{
non_integral_constant name;
/* The symbols __FUNCTION__, __PRETTY_FUNCTION__, and
__func__ are the names of variables -- but they are
treated specially. Therefore, they are handled here,
rather than relying on the generic id-expression logic
below. Grammatically, these names are id-expressions.
Consume the token. */
token = cp_lexer_consume_token (parser->lexer);
switch (token->keyword)
{
case RID_FUNCTION_NAME:
name = NIC_FUNC_NAME;
break;
case RID_PRETTY_FUNCTION_NAME:
name = NIC_PRETTY_FUNC;
break;
case RID_C99_FUNCTION_NAME:
name = NIC_C99_FUNC;
break;
default:
gcc_unreachable ();
}
if (cp_parser_non_integral_constant_expression (parser, name))
return error_mark_node;
/* Look up the name. */
return finish_fname (token->u.value);
}
case RID_VA_ARG:
{
tree expression;
tree type;
source_location type_location;
/* The `__builtin_va_arg' construct is used to handle
`va_arg'. Consume the `__builtin_va_arg' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the opening `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Now, parse the assignment-expression. */
expression = cp_parser_assignment_expression (parser);
/* Look for the `,'. */
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
type_location = cp_lexer_peek_token (parser->lexer)->location;
/* Parse the type-id. */
type = cp_parser_type_id (parser);
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Using `va_arg' in a constant-expression is not
allowed. */
if (cp_parser_non_integral_constant_expression (parser,
NIC_VA_ARG))
return error_mark_node;
return build_x_va_arg (type_location, expression, type);
}
case RID_OFFSETOF:
return cp_parser_builtin_offsetof (parser);
case RID_HAS_NOTHROW_ASSIGN:
case RID_HAS_NOTHROW_CONSTRUCTOR:
case RID_HAS_NOTHROW_COPY:
case RID_HAS_TRIVIAL_ASSIGN:
case RID_HAS_TRIVIAL_CONSTRUCTOR:
case RID_HAS_TRIVIAL_COPY:
case RID_HAS_TRIVIAL_DESTRUCTOR:
case RID_HAS_VIRTUAL_DESTRUCTOR:
case RID_IS_ABSTRACT:
case RID_IS_BASE_OF:
case RID_IS_CLASS:
case RID_IS_EMPTY:
case RID_IS_ENUM:
case RID_IS_FINAL:
case RID_IS_LITERAL_TYPE:
case RID_IS_POD:
case RID_IS_POLYMORPHIC:
case RID_IS_STD_LAYOUT:
case RID_IS_TRIVIAL:
case RID_IS_TRIVIALLY_ASSIGNABLE:
case RID_IS_TRIVIALLY_CONSTRUCTIBLE:
case RID_IS_TRIVIALLY_COPYABLE:
case RID_IS_UNION:
return cp_parser_trait_expr (parser, token->keyword);
/* Objective-C++ expressions. */
case RID_AT_ENCODE:
case RID_AT_PROTOCOL:
case RID_AT_SELECTOR:
return cp_parser_objc_expression (parser);
case RID_TEMPLATE:
if (parser->in_function_body
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_LESS))
{
error_at (token->location,
"a template declaration cannot appear at block scope");
cp_parser_skip_to_end_of_block_or_statement (parser);
return error_mark_node;
}
default:
cp_parser_error (parser, "expected primary-expression");
return error_mark_node;
}
/* An id-expression can start with either an identifier, a
`::' as the beginning of a qualified-id, or the "operator"
keyword. */
case CPP_NAME:
case CPP_SCOPE:
case CPP_TEMPLATE_ID:
case CPP_NESTED_NAME_SPECIFIER:
{
tree id_expression;
tree decl;
const char *error_msg;
bool template_p;
bool done;
cp_token *id_expr_token;
id_expression:
/* Parse the id-expression. */
id_expression
= cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
&template_p,
/*declarator_p=*/false,
/*optional_p=*/false);
if (id_expression == error_mark_node)
return error_mark_node;
id_expr_token = token;
token = cp_lexer_peek_token (parser->lexer);
done = (token->type != CPP_OPEN_SQUARE
&& token->type != CPP_OPEN_PAREN
&& token->type != CPP_DOT
&& token->type != CPP_DEREF
&& token->type != CPP_PLUS_PLUS
&& token->type != CPP_MINUS_MINUS);
/* If we have a template-id, then no further lookup is
required. If the template-id was for a template-class, we
will sometimes have a TYPE_DECL at this point. */
if (TREE_CODE (id_expression) == TEMPLATE_ID_EXPR
|| TREE_CODE (id_expression) == TYPE_DECL)
decl = id_expression;
/* Look up the name. */
else
{
tree ambiguous_decls;
/* If we already know that this lookup is ambiguous, then
we've already issued an error message; there's no reason
to check again. */
if (id_expr_token->type == CPP_NAME
&& id_expr_token->error_reported)
{
cp_parser_simulate_error (parser);
return error_mark_node;
}
decl = cp_parser_lookup_name (parser, id_expression,
none_type,
template_p,
/*is_namespace=*/false,
/*check_dependency=*/true,
&ambiguous_decls,
id_expr_token->location);
/* If the lookup was ambiguous, an error will already have
been issued. */
if (ambiguous_decls)
return error_mark_node;
/* In Objective-C++, we may have an Objective-C 2.0
dot-syntax for classes here. */
if (c_dialect_objc ()
&& cp_lexer_peek_token (parser->lexer)->type == CPP_DOT
&& TREE_CODE (decl) == TYPE_DECL
&& objc_is_class_name (decl))
{
tree component;
cp_lexer_consume_token (parser->lexer);
component = cp_parser_identifier (parser);
if (component == error_mark_node)
return error_mark_node;
return objc_build_class_component_ref (id_expression, component);
}
/* In Objective-C++, an instance variable (ivar) may be preferred
to whatever cp_parser_lookup_name() found. */
decl = objc_lookup_ivar (decl, id_expression);
/* If name lookup gives us a SCOPE_REF, then the
qualifying scope was dependent. */
if (TREE_CODE (decl) == SCOPE_REF)
{
/* At this point, we do not know if DECL is a valid
integral constant expression. We assume that it is
in fact such an expression, so that code like:
template <int N> struct A {
int a[B<N>::i];
};
is accepted. At template-instantiation time, we
will check that B<N>::i is actually a constant. */
return decl;
}
/* Check to see if DECL is a local variable in a context
where that is forbidden. */
if (parser->local_variables_forbidden_p
&& local_variable_p (decl))
{
/* It might be that we only found DECL because we are
trying to be generous with pre-ISO scoping rules.
For example, consider:
int i;
void g() {
for (int i = 0; i < 10; ++i) {}
extern void f(int j = i);
}
Here, name look up will originally find the out
of scope `i'. We need to issue a warning message,
but then use the global `i'. */
decl = check_for_out_of_scope_variable (decl);
if (local_variable_p (decl))
{
error_at (id_expr_token->location,
"local variable %qD may not appear in this context",
decl);
return error_mark_node;
}
}
}
decl = (finish_id_expression
(id_expression, decl, parser->scope,
idk,
parser->integral_constant_expression_p,
parser->allow_non_integral_constant_expression_p,
&parser->non_integral_constant_expression_p,
template_p, done, address_p,
template_arg_p,
&error_msg,
id_expr_token->location));
if (error_msg)
cp_parser_error (parser, error_msg);
return decl;
}
/* Anything else is an error. */
default:
cp_parser_error (parser, "expected primary-expression");
return error_mark_node;
}
}
static inline tree
cp_parser_primary_expression (cp_parser *parser,
bool address_p,
bool cast_p,
bool template_arg_p,
cp_id_kind *idk)
{
return cp_parser_primary_expression (parser, address_p, cast_p, template_arg_p,
/*decltype*/false, idk);
}
/* Parse an id-expression.
id-expression:
unqualified-id
qualified-id
qualified-id:
:: [opt] nested-name-specifier template [opt] unqualified-id
:: identifier
:: operator-function-id
:: template-id
Return a representation of the unqualified portion of the
identifier. Sets PARSER->SCOPE to the qualifying scope if there is
a `::' or nested-name-specifier.
Often, if the id-expression was a qualified-id, the caller will
want to make a SCOPE_REF to represent the qualified-id. This
function does not do this in order to avoid wastefully creating
SCOPE_REFs when they are not required.
If TEMPLATE_KEYWORD_P is true, then we have just seen the
`template' keyword.
If CHECK_DEPENDENCY_P is false, then names are looked up inside
uninstantiated templates.
If *TEMPLATE_P is non-NULL, it is set to true iff the
`template' keyword is used to explicitly indicate that the entity
named is a template.
If DECLARATOR_P is true, the id-expression is appearing as part of
a declarator, rather than as part of an expression. */
static tree
cp_parser_id_expression (cp_parser *parser,
bool template_keyword_p,
bool check_dependency_p,
bool *template_p,
bool declarator_p,
bool optional_p)
{
bool global_scope_p;
bool nested_name_specifier_p;
/* Assume the `template' keyword was not used. */
if (template_p)
*template_p = template_keyword_p;
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the optional nested-name-specifier. */
nested_name_specifier_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
check_dependency_p,
/*type_p=*/false,
declarator_p)
!= NULL_TREE);
/* If there is a nested-name-specifier, then we are looking at
the first qualified-id production. */
if (nested_name_specifier_p)
{
tree saved_scope;
tree saved_object_scope;
tree saved_qualifying_scope;
tree unqualified_id;
bool is_template;
/* See if the next token is the `template' keyword. */
if (!template_p)
template_p = &is_template;
*template_p = cp_parser_optional_template_keyword (parser);
/* Name lookup we do during the processing of the
unqualified-id might obliterate SCOPE. */
saved_scope = parser->scope;
saved_object_scope = parser->object_scope;
saved_qualifying_scope = parser->qualifying_scope;
/* Process the final unqualified-id. */
unqualified_id = cp_parser_unqualified_id (parser, *template_p,
check_dependency_p,
declarator_p,
/*optional_p=*/false);
/* Restore the SAVED_SCOPE for our caller. */
parser->scope = saved_scope;
parser->object_scope = saved_object_scope;
parser->qualifying_scope = saved_qualifying_scope;
return unqualified_id;
}
/* Otherwise, if we are in global scope, then we are looking at one
of the other qualified-id productions. */
else if (global_scope_p)
{
cp_token *token;
tree id;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's an identifier, and the next token is not a "<", then
we can avoid the template-id case. This is an optimization
for this common case. */
if (token->type == CPP_NAME
&& !cp_parser_nth_token_starts_template_argument_list_p
(parser, 2))
return cp_parser_identifier (parser);
cp_parser_parse_tentatively (parser);
/* Try a template-id. */
id = cp_parser_template_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
none_type,
declarator_p);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* Peek at the next token. (Changes in the token buffer may
have invalidated the pointer obtained above.) */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_NAME:
return cp_parser_identifier (parser);
case CPP_KEYWORD:
if (token->keyword == RID_OPERATOR)
return cp_parser_operator_function_id (parser);
/* Fall through. */
default:
cp_parser_error (parser, "expected id-expression");
return error_mark_node;
}
}
else
return cp_parser_unqualified_id (parser, template_keyword_p,
/*check_dependency_p=*/true,
declarator_p,
optional_p);
}
/* Parse an unqualified-id.
unqualified-id:
identifier
operator-function-id
conversion-function-id
~ class-name
template-id
If TEMPLATE_KEYWORD_P is TRUE, we have just seen the `template'
keyword, in a construct like `A::template ...'.
Returns a representation of unqualified-id. For the `identifier'
production, an IDENTIFIER_NODE is returned. For the `~ class-name'
production a BIT_NOT_EXPR is returned; the operand of the
BIT_NOT_EXPR is an IDENTIFIER_NODE for the class-name. For the
other productions, see the documentation accompanying the
corresponding parsing functions. If CHECK_DEPENDENCY_P is false,
names are looked up in uninstantiated templates. If DECLARATOR_P
is true, the unqualified-id is appearing as part of a declarator,
rather than as part of an expression. */
static tree
cp_parser_unqualified_id (cp_parser* parser,
bool template_keyword_p,
bool check_dependency_p,
bool declarator_p,
bool optional_p)
{
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch ((int) token->type)
{
case CPP_NAME:
{
tree id;
/* We don't know yet whether or not this will be a
template-id. */
cp_parser_parse_tentatively (parser);
/* Try a template-id. */
id = cp_parser_template_id (parser, template_keyword_p,
check_dependency_p,
none_type,
declarator_p);
/* If it worked, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* Otherwise, it's an ordinary identifier. */
return cp_parser_identifier (parser);
}
case CPP_TEMPLATE_ID:
return cp_parser_template_id (parser, template_keyword_p,
check_dependency_p,
none_type,
declarator_p);
case CPP_COMPL:
{
tree type_decl;
tree qualifying_scope;
tree object_scope;
tree scope;
bool done;
/* Consume the `~' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the class-name. The standard, as written, seems to
say that:
template <typename T> struct S { ~S (); };
template <typename T> S<T>::~S() {}
is invalid, since `~' must be followed by a class-name, but
`S<T>' is dependent, and so not known to be a class.
That's not right; we need to look in uninstantiated
templates. A further complication arises from:
template <typename T> void f(T t) {
t.T::~T();
}
Here, it is not possible to look up `T' in the scope of `T'
itself. We must look in both the current scope, and the
scope of the containing complete expression.
Yet another issue is:
struct S {
int S;
~S();
};
S::~S() {}
The standard does not seem to say that the `S' in `~S'
should refer to the type `S' and not the data member
`S::S'. */
/* DR 244 says that we look up the name after the "~" in the
same scope as we looked up the qualifying name. That idea
isn't fully worked out; it's more complicated than that. */
scope = parser->scope;
object_scope = parser->object_scope;
qualifying_scope = parser->qualifying_scope;
/* Check for invalid scopes. */
if (scope == error_mark_node)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
cp_lexer_consume_token (parser->lexer);
return error_mark_node;
}
if (scope && TREE_CODE (scope) == NAMESPACE_DECL)
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
error_at (token->location,
"scope %qT before %<~%> is not a class-name",
scope);
cp_parser_simulate_error (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
cp_lexer_consume_token (parser->lexer);
return error_mark_node;
}
gcc_assert (!scope || TYPE_P (scope));
/* If the name is of the form "X::~X" it's OK even if X is a
typedef. */
token = cp_lexer_peek_token (parser->lexer);
if (scope
&& token->type == CPP_NAME
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
!= CPP_LESS)
&& (token->u.value == TYPE_IDENTIFIER (scope)
|| (CLASS_TYPE_P (scope)
&& constructor_name_p (token->u.value, scope))))
{
cp_lexer_consume_token (parser->lexer);
return build_nt (BIT_NOT_EXPR, scope);
}
/* ~auto means the destructor of whatever the object is. */
if (cp_parser_is_keyword (token, RID_AUTO))
{
if (cxx_dialect < cxx14)
pedwarn (input_location, 0,
"%<~auto%> only available with "
"-std=c++14 or -std=gnu++14");
cp_lexer_consume_token (parser->lexer);
return build_nt (BIT_NOT_EXPR, make_auto ());
}
/* If there was an explicit qualification (S::~T), first look
in the scope given by the qualification (i.e., S).
Note: in the calls to cp_parser_class_name below we pass
typename_type so that lookup finds the injected-class-name
rather than the constructor. */
done = false;
type_decl = NULL_TREE;
if (scope)
{
cp_parser_parse_tentatively (parser);
type_decl = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
typename_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (cp_parser_parse_definitely (parser))
done = true;
}
/* In "N::S::~S", look in "N" as well. */
if (!done && scope && qualifying_scope)
{
cp_parser_parse_tentatively (parser);
parser->scope = qualifying_scope;
parser->object_scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
type_decl
= cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
typename_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (cp_parser_parse_definitely (parser))
done = true;
}
/* In "p->S::~T", look in the scope given by "*p" as well. */
else if (!done && object_scope)
{
cp_parser_parse_tentatively (parser);
parser->scope = object_scope;
parser->object_scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
type_decl
= cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
typename_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (cp_parser_parse_definitely (parser))
done = true;
}
/* Look in the surrounding context. */
if (!done)
{
parser->scope = NULL_TREE;
parser->object_scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
if (processing_template_decl)
cp_parser_parse_tentatively (parser);
type_decl
= cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
typename_type,
/*check_dependency=*/false,
/*class_head_p=*/false,
declarator_p);
if (processing_template_decl
&& ! cp_parser_parse_definitely (parser))
{
/* We couldn't find a type with this name, so just accept
it and check for a match at instantiation time. */
type_decl = cp_parser_identifier (parser);
if (type_decl != error_mark_node)
type_decl = build_nt (BIT_NOT_EXPR, type_decl);
return type_decl;
}
}
/* If an error occurred, assume that the name of the
destructor is the same as the name of the qualifying
class. That allows us to keep parsing after running
into ill-formed destructor names. */
if (type_decl == error_mark_node && scope)
return build_nt (BIT_NOT_EXPR, scope);
else if (type_decl == error_mark_node)
return error_mark_node;
/* Check that destructor name and scope match. */
if (declarator_p && scope && !check_dtor_name (scope, type_decl))
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
error_at (token->location,
"declaration of %<~%T%> as member of %qT",
type_decl, scope);
cp_parser_simulate_error (parser);
return error_mark_node;
}
/* [class.dtor]
A typedef-name that names a class shall not be used as the
identifier in the declarator for a destructor declaration. */
if (declarator_p
&& !DECL_IMPLICIT_TYPEDEF_P (type_decl)
&& !DECL_SELF_REFERENCE_P (type_decl)
&& !cp_parser_uncommitted_to_tentative_parse_p (parser))
error_at (token->location,
"typedef-name %qD used as destructor declarator",
type_decl);
return build_nt (BIT_NOT_EXPR, TREE_TYPE (type_decl));
}
case CPP_KEYWORD:
if (token->keyword == RID_OPERATOR)
{
tree id;
/* This could be a template-id, so we try that first. */
cp_parser_parse_tentatively (parser);
/* Try a template-id. */
id = cp_parser_template_id (parser, template_keyword_p,
/*check_dependency_p=*/true,
none_type,
declarator_p);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* We still don't know whether we're looking at an
operator-function-id or a conversion-function-id. */
cp_parser_parse_tentatively (parser);
/* Try an operator-function-id. */
id = cp_parser_operator_function_id (parser);
/* If that didn't work, try a conversion-function-id. */
if (!cp_parser_parse_definitely (parser))
id = cp_parser_conversion_function_id (parser);
else if (UDLIT_OPER_P (id))
{
/* 17.6.3.3.5 */
const char *name = UDLIT_OP_SUFFIX (id);
if (name[0] != '_' && !in_system_header_at (input_location)
&& declarator_p)
warning (0, "literal operator suffixes not preceded by %<_%>"
" are reserved for future standardization");
}
return id;
}
/* Fall through. */
default:
if (optional_p)
return NULL_TREE;
cp_parser_error (parser, "expected unqualified-id");
return error_mark_node;
}
}
/* Parse an (optional) nested-name-specifier.
nested-name-specifier: [C++98]
class-or-namespace-name :: nested-name-specifier [opt]
class-or-namespace-name :: template nested-name-specifier [opt]
nested-name-specifier: [C++0x]
type-name ::
namespace-name ::
nested-name-specifier identifier ::
nested-name-specifier template [opt] simple-template-id ::
PARSER->SCOPE should be set appropriately before this function is
called. TYPENAME_KEYWORD_P is TRUE if the `typename' keyword is in
effect. TYPE_P is TRUE if we non-type bindings should be ignored
in name lookups.
Sets PARSER->SCOPE to the class (TYPE) or namespace
(NAMESPACE_DECL) specified by the nested-name-specifier, or leaves
it unchanged if there is no nested-name-specifier. Returns the new
scope iff there is a nested-name-specifier, or NULL_TREE otherwise.
If IS_DECLARATION is TRUE, the nested-name-specifier is known to be
part of a declaration and/or decl-specifier. */
static tree
cp_parser_nested_name_specifier_opt (cp_parser *parser,
bool typename_keyword_p,
bool check_dependency_p,
bool type_p,
bool is_declaration)
{
bool success = false;
cp_token_position start = 0;
cp_token *token;
/* Remember where the nested-name-specifier starts. */
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
{
start = cp_lexer_token_position (parser->lexer, false);
push_deferring_access_checks (dk_deferred);
}
while (true)
{
tree new_scope;
tree old_scope;
tree saved_qualifying_scope;
bool template_keyword_p;
/* Spot cases that cannot be the beginning of a
nested-name-specifier. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is CPP_NESTED_NAME_SPECIFIER, just process
the already parsed nested-name-specifier. */
if (token->type == CPP_NESTED_NAME_SPECIFIER)
{
/* Grab the nested-name-specifier and continue the loop. */
cp_parser_pre_parsed_nested_name_specifier (parser);
/* If we originally encountered this nested-name-specifier
with IS_DECLARATION set to false, we will not have
resolved TYPENAME_TYPEs, so we must do so here. */
if (is_declaration
&& TREE_CODE (parser->scope) == TYPENAME_TYPE)
{
new_scope = resolve_typename_type (parser->scope,
/*only_current_p=*/false);
if (TREE_CODE (new_scope) != TYPENAME_TYPE)
parser->scope = new_scope;
}
success = true;
continue;
}
/* Spot cases that cannot be the beginning of a
nested-name-specifier. On the second and subsequent times
through the loop, we look for the `template' keyword. */
if (success && token->keyword == RID_TEMPLATE)
;
/* A template-id can start a nested-name-specifier. */
else if (token->type == CPP_TEMPLATE_ID)
;
/* DR 743: decltype can be used in a nested-name-specifier. */
else if (token_is_decltype (token))
;
else
{
/* If the next token is not an identifier, then it is
definitely not a type-name or namespace-name. */
if (token->type != CPP_NAME)
break;
/* If the following token is neither a `<' (to begin a
template-id), nor a `::', then we are not looking at a
nested-name-specifier. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
if (token->type == CPP_COLON
&& parser->colon_corrects_to_scope_p
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_NAME)
{
error_at (token->location,
"found %<:%> in nested-name-specifier, expected %<::%>");
token->type = CPP_SCOPE;
}
if (token->type != CPP_SCOPE
&& !cp_parser_nth_token_starts_template_argument_list_p
(parser, 2))
break;
}
/* The nested-name-specifier is optional, so we parse
tentatively. */
cp_parser_parse_tentatively (parser);
/* Look for the optional `template' keyword, if this isn't the
first time through the loop. */
if (success)
template_keyword_p = cp_parser_optional_template_keyword (parser);
else
template_keyword_p = false;
/* Save the old scope since the name lookup we are about to do
might destroy it. */
old_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
/* In a declarator-id like "X<T>::I::Y<T>" we must be able to
look up names in "X<T>::I" in order to determine that "Y" is
a template. So, if we have a typename at this point, we make
an effort to look through it. */
if (is_declaration
&& !typename_keyword_p
&& parser->scope
&& TREE_CODE (parser->scope) == TYPENAME_TYPE)
parser->scope = resolve_typename_type (parser->scope,
/*only_current_p=*/false);
/* Parse the qualifying entity. */
new_scope
= cp_parser_qualifying_entity (parser,
typename_keyword_p,
template_keyword_p,
check_dependency_p,
type_p,
is_declaration);
/* Look for the `::' token. */
cp_parser_require (parser, CPP_SCOPE, RT_SCOPE);
/* If we found what we wanted, we keep going; otherwise, we're
done. */
if (!cp_parser_parse_definitely (parser))
{
bool error_p = false;
/* Restore the OLD_SCOPE since it was valid before the
failed attempt at finding the last
class-or-namespace-name. */
parser->scope = old_scope;
parser->qualifying_scope = saved_qualifying_scope;
/* If the next token is a decltype, and the one after that is a
`::', then the decltype has failed to resolve to a class or
enumeration type. Give this error even when parsing
tentatively since it can't possibly be valid--and we're going
to replace it with a CPP_NESTED_NAME_SPECIFIER below, so we
won't get another chance.*/
if (cp_lexer_next_token_is (parser->lexer, CPP_DECLTYPE)
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_SCOPE))
{
token = cp_lexer_consume_token (parser->lexer);
error_at (token->location, "decltype evaluates to %qT, "
"which is not a class or enumeration type",
token->u.value);
parser->scope = error_mark_node;
error_p = true;
/* As below. */
success = true;
cp_lexer_consume_token (parser->lexer);
}
if (cp_lexer_next_token_is (parser->lexer, CPP_TEMPLATE_ID)
&& cp_lexer_nth_token_is (parser->lexer, 2, CPP_SCOPE))
{
/* If we have a non-type template-id followed by ::, it can't
possibly be valid. */
token = cp_lexer_peek_token (parser->lexer);
tree tid = token->u.tree_check_value->value;
if (TREE_CODE (tid) == TEMPLATE_ID_EXPR
&& TREE_CODE (TREE_OPERAND (tid, 0)) != IDENTIFIER_NODE)
{
tree tmpl = NULL_TREE;
if (is_overloaded_fn (tid))
{
tree fns = get_fns (tid);
if (!OVL_CHAIN (fns))
tmpl = OVL_CURRENT (fns);
error_at (token->location, "function template-id %qD "
"in nested-name-specifier", tid);
}
else
{
/* Variable template. */
tmpl = TREE_OPERAND (tid, 0);
gcc_assert (variable_template_p (tmpl));
error_at (token->location, "variable template-id %qD "
"in nested-name-specifier", tid);
}
if (tmpl)
inform (DECL_SOURCE_LOCATION (tmpl),
"%qD declared here", tmpl);
parser->scope = error_mark_node;
error_p = true;
/* As below. */
success = true;
cp_lexer_consume_token (parser->lexer);
cp_lexer_consume_token (parser->lexer);
}
}
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
break;
/* If the next token is an identifier, and the one after
that is a `::', then any valid interpretation would have
found a class-or-namespace-name. */
while (cp_lexer_next_token_is (parser->lexer, CPP_NAME)
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_SCOPE)
&& (cp_lexer_peek_nth_token (parser->lexer, 3)->type
!= CPP_COMPL))
{
token = cp_lexer_consume_token (parser->lexer);
if (!error_p)
{
if (!token->error_reported)
{
tree decl;
tree ambiguous_decls;
decl = cp_parser_lookup_name (parser, token->u.value,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
&ambiguous_decls,
token->location);
if (TREE_CODE (decl) == TEMPLATE_DECL)
error_at (token->location,
"%qD used without template parameters",
decl);
else if (ambiguous_decls)
{
// cp_parser_lookup_name has the same diagnostic,
// thus make sure to emit it at most once.
if (cp_parser_uncommitted_to_tentative_parse_p
(parser))
{
error_at (token->location,
"reference to %qD is ambiguous",
token->u.value);
print_candidates (ambiguous_decls);
}
decl = error_mark_node;
}
else
{
if (cxx_dialect != cxx98)
cp_parser_name_lookup_error
(parser, token->u.value, decl, NLE_NOT_CXX98,
token->location);
else
cp_parser_name_lookup_error
(parser, token->u.value, decl, NLE_CXX98,
token->location);
}
}
parser->scope = error_mark_node;
error_p = true;
/* Treat this as a successful nested-name-specifier
due to:
[basic.lookup.qual]
If the name found is not a class-name (clause
_class_) or namespace-name (_namespace.def_), the
program is ill-formed. */
success = true;
}
cp_lexer_consume_token (parser->lexer);
}
break;
}
/* We've found one valid nested-name-specifier. */
success = true;
/* Name lookup always gives us a DECL. */
if (TREE_CODE (new_scope) == TYPE_DECL)
new_scope = TREE_TYPE (new_scope);
/* Uses of "template" must be followed by actual templates. */
if (template_keyword_p
&& !(CLASS_TYPE_P (new_scope)
&& ((CLASSTYPE_USE_TEMPLATE (new_scope)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (new_scope)))
|| CLASSTYPE_IS_TEMPLATE (new_scope)))
&& !(TREE_CODE (new_scope) == TYPENAME_TYPE
&& (TREE_CODE (TYPENAME_TYPE_FULLNAME (new_scope))
== TEMPLATE_ID_EXPR)))
permerror (input_location, TYPE_P (new_scope)
? G_("%qT is not a template")
: G_("%qD is not a template"),
new_scope);
/* If it is a class scope, try to complete it; we are about to
be looking up names inside the class. */
if (TYPE_P (new_scope)
/* Since checking types for dependency can be expensive,
avoid doing it if the type is already complete. */
&& !COMPLETE_TYPE_P (new_scope)
/* Do not try to complete dependent types. */
&& !dependent_type_p (new_scope))
{
new_scope = complete_type (new_scope);
/* If it is a typedef to current class, use the current
class instead, as the typedef won't have any names inside
it yet. */
if (!COMPLETE_TYPE_P (new_scope)
&& currently_open_class (new_scope))
new_scope = TYPE_MAIN_VARIANT (new_scope);
}
/* Make sure we look in the right scope the next time through
the loop. */
parser->scope = new_scope;
}
/* If parsing tentatively, replace the sequence of tokens that makes
up the nested-name-specifier with a CPP_NESTED_NAME_SPECIFIER
token. That way, should we re-parse the token stream, we will
not have to repeat the effort required to do the parse, nor will
we issue duplicate error messages. */
if (success && start)
{
cp_token *token;
token = cp_lexer_token_at (parser->lexer, start);
/* Reset the contents of the START token. */
token->type = CPP_NESTED_NAME_SPECIFIER;
/* Retrieve any deferred checks. Do not pop this access checks yet
so the memory will not be reclaimed during token replacing below. */
token->u.tree_check_value = ggc_cleared_alloc<struct tree_check> ();
token->u.tree_check_value->value = parser->scope;
token->u.tree_check_value->checks = get_deferred_access_checks ();
token->u.tree_check_value->qualifying_scope =
parser->qualifying_scope;
token->keyword = RID_MAX;
/* Purge all subsequent tokens. */
cp_lexer_purge_tokens_after (parser->lexer, start);
}
if (start)
pop_to_parent_deferring_access_checks ();
return success ? parser->scope : NULL_TREE;
}
/* Parse a nested-name-specifier. See
cp_parser_nested_name_specifier_opt for details. This function
behaves identically, except that it will an issue an error if no
nested-name-specifier is present. */
static tree
cp_parser_nested_name_specifier (cp_parser *parser,
bool typename_keyword_p,
bool check_dependency_p,
bool type_p,
bool is_declaration)
{
tree scope;
/* Look for the nested-name-specifier. */
scope = cp_parser_nested_name_specifier_opt (parser,
typename_keyword_p,
check_dependency_p,
type_p,
is_declaration);
/* If it was not present, issue an error message. */
if (!scope)
{
cp_parser_error (parser, "expected nested-name-specifier");
parser->scope = NULL_TREE;
}
return scope;
}
/* Parse the qualifying entity in a nested-name-specifier. For C++98,
this is either a class-name or a namespace-name (which corresponds
to the class-or-namespace-name production in the grammar). For
C++0x, it can also be a type-name that refers to an enumeration
type or a simple-template-id.
TYPENAME_KEYWORD_P is TRUE iff the `typename' keyword is in effect.
TEMPLATE_KEYWORD_P is TRUE iff the `template' keyword is in effect.
CHECK_DEPENDENCY_P is FALSE iff dependent names should be looked up.
TYPE_P is TRUE iff the next name should be taken as a class-name,
even the same name is declared to be another entity in the same
scope.
Returns the class (TYPE_DECL) or namespace (NAMESPACE_DECL)
specified by the class-or-namespace-name. If neither is found the
ERROR_MARK_NODE is returned. */
static tree
cp_parser_qualifying_entity (cp_parser *parser,
bool typename_keyword_p,
bool template_keyword_p,
bool check_dependency_p,
bool type_p,
bool is_declaration)
{
tree saved_scope;
tree saved_qualifying_scope;
tree saved_object_scope;
tree scope;
bool only_class_p;
bool successful_parse_p;
/* DR 743: decltype can appear in a nested-name-specifier. */
if (cp_lexer_next_token_is_decltype (parser->lexer))
{
scope = cp_parser_decltype (parser);
if (TREE_CODE (scope) != ENUMERAL_TYPE
&& !MAYBE_CLASS_TYPE_P (scope))
{
cp_parser_simulate_error (parser);
return error_mark_node;
}
if (TYPE_NAME (scope))
scope = TYPE_NAME (scope);
return scope;
}
/* Before we try to parse the class-name, we must save away the
current PARSER->SCOPE since cp_parser_class_name will destroy
it. */
saved_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
saved_object_scope = parser->object_scope;
/* Try for a class-name first. If the SAVED_SCOPE is a type, then
there is no need to look for a namespace-name. */
only_class_p = template_keyword_p
|| (saved_scope && TYPE_P (saved_scope) && cxx_dialect == cxx98);
if (!only_class_p)
cp_parser_parse_tentatively (parser);
scope = cp_parser_class_name (parser,
typename_keyword_p,
template_keyword_p,
type_p ? class_type : none_type,
check_dependency_p,
/*class_head_p=*/false,
is_declaration);
successful_parse_p = only_class_p || cp_parser_parse_definitely (parser);
/* If that didn't work and we're in C++0x mode, try for a type-name. */
if (!only_class_p
&& cxx_dialect != cxx98
&& !successful_parse_p)
{
/* Restore the saved scope. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
/* Parse tentatively. */
cp_parser_parse_tentatively (parser);
/* Parse a type-name */
scope = cp_parser_type_name (parser);
/* "If the name found does not designate a namespace or a class,
enumeration, or dependent type, the program is ill-formed."
We cover classes and dependent types above and namespaces below,
so this code is only looking for enums. */
if (!scope || TREE_CODE (scope) != TYPE_DECL
|| TREE_CODE (TREE_TYPE (scope)) != ENUMERAL_TYPE)
cp_parser_simulate_error (parser);
successful_parse_p = cp_parser_parse_definitely (parser);
}
/* If that didn't work, try for a namespace-name. */
if (!only_class_p && !successful_parse_p)
{
/* Restore the saved scope. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
/* If we are not looking at an identifier followed by the scope
resolution operator, then this is not part of a
nested-name-specifier. (Note that this function is only used
to parse the components of a nested-name-specifier.) */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME)
|| cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE)
return error_mark_node;
scope = cp_parser_namespace_name (parser);
}
return scope;
}
/* Return true if we are looking at a compound-literal, false otherwise. */
static bool
cp_parser_compound_literal_p (cp_parser *parser)
{
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
cp_lexer_save_tokens (parser->lexer);
/* Skip tokens until the next token is a closing parenthesis.
If we find the closing `)', and the next token is a `{', then
we are looking at a compound-literal. */
bool compound_literal_p
= (cp_parser_skip_to_closing_parenthesis (parser, false, false,
/*consume_paren=*/true)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE));
/* Roll back the tokens we skipped. */
cp_lexer_rollback_tokens (parser->lexer);
return compound_literal_p;
}
/* Parse a postfix-expression.
postfix-expression:
primary-expression
postfix-expression [ expression ]
postfix-expression ( expression-list [opt] )
simple-type-specifier ( expression-list [opt] )
typename :: [opt] nested-name-specifier identifier
( expression-list [opt] )
typename :: [opt] nested-name-specifier template [opt] template-id
( expression-list [opt] )
postfix-expression . template [opt] id-expression
postfix-expression -> template [opt] id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> pseudo-destructor-name
postfix-expression ++
postfix-expression --
dynamic_cast < type-id > ( expression )
static_cast < type-id > ( expression )
reinterpret_cast < type-id > ( expression )
const_cast < type-id > ( expression )
typeid ( expression )
typeid ( type-id )
GNU Extension:
postfix-expression:
( type-id ) { initializer-list , [opt] }
This extension is a GNU version of the C99 compound-literal
construct. (The C99 grammar uses `type-name' instead of `type-id',
but they are essentially the same concept.)
If ADDRESS_P is true, the postfix expression is the operand of the
`&' operator. CAST_P is true if this expression is the target of a
cast.
If MEMBER_ACCESS_ONLY_P, we only allow postfix expressions that are
class member access expressions [expr.ref].
Returns a representation of the expression. */
static tree
cp_parser_postfix_expression (cp_parser *parser, bool address_p, bool cast_p,
bool member_access_only_p, bool decltype_p,
cp_id_kind * pidk_return)
{
cp_token *token;
location_t loc;
enum rid keyword;
cp_id_kind idk = CP_ID_KIND_NONE;
tree postfix_expression = NULL_TREE;
bool is_member_access = false;
int saved_in_statement = -1;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
loc = token->location;
/* Some of the productions are determined by keywords. */
keyword = token->keyword;
switch (keyword)
{
case RID_DYNCAST:
case RID_STATCAST:
case RID_REINTCAST:
case RID_CONSTCAST:
{
tree type;
tree expression;
const char *saved_message;
bool saved_in_type_id_in_expr_p;
/* All of these can be handled in the same way from the point
of view of parsing. Begin by consuming the token
identifying the cast. */
cp_lexer_consume_token (parser->lexer);
/* New types cannot be defined in the cast. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in casts");
/* Look for the opening `<'. */
cp_parser_require (parser, CPP_LESS, RT_LESS);
/* Parse the type to which we are casting. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
type = cp_parser_type_id (parser);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
/* Look for the closing `>'. */
cp_parser_require (parser, CPP_GREATER, RT_GREATER);
/* Restore the old message. */
parser->type_definition_forbidden_message = saved_message;
bool saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
/* And the expression which is being cast. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
expression = cp_parser_expression (parser, & idk, /*cast_p=*/true);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
/* Only type conversions to integral or enumeration types
can be used in constant-expressions. */
if (!cast_valid_in_integral_constant_expression_p (type)
&& cp_parser_non_integral_constant_expression (parser, NIC_CAST))
return error_mark_node;
switch (keyword)
{
case RID_DYNCAST:
postfix_expression
= build_dynamic_cast (type, expression, tf_warning_or_error);
break;
case RID_STATCAST:
postfix_expression
= build_static_cast (type, expression, tf_warning_or_error);
break;
case RID_REINTCAST:
postfix_expression
= build_reinterpret_cast (type, expression,
tf_warning_or_error);
break;
case RID_CONSTCAST:
postfix_expression
= build_const_cast (type, expression, tf_warning_or_error);
break;
default:
gcc_unreachable ();
}
}
break;
case RID_TYPEID:
{
tree type;
const char *saved_message;
bool saved_in_type_id_in_expr_p;
/* Consume the `typeid' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `(' token. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Types cannot be defined in a `typeid' expression. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in a %<typeid%> expression");
/* We can't be sure yet whether we're looking at a type-id or an
expression. */
cp_parser_parse_tentatively (parser);
/* Try a type-id first. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
type = cp_parser_type_id (parser);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
/* Look for the `)' token. Otherwise, we can't be sure that
we're not looking at an expression: consider `typeid (int
(3))', for example. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* If all went well, simply lookup the type-id. */
if (cp_parser_parse_definitely (parser))
postfix_expression = get_typeid (type, tf_warning_or_error);
/* Otherwise, fall back to the expression variant. */
else
{
tree expression;
/* Look for an expression. */
expression = cp_parser_expression (parser, & idk);
/* Compute its typeid. */
postfix_expression = build_typeid (expression, tf_warning_or_error);
/* Look for the `)' token. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
/* `typeid' may not appear in an integral constant expression. */
if (cp_parser_non_integral_constant_expression (parser, NIC_TYPEID))
return error_mark_node;
}
break;
case RID_TYPENAME:
{
tree type;
/* The syntax permitted here is the same permitted for an
elaborated-type-specifier. */
type = cp_parser_elaborated_type_specifier (parser,
/*is_friend=*/false,
/*is_declaration=*/false);
postfix_expression = cp_parser_functional_cast (parser, type);
}
break;
case RID_CILK_SPAWN:
{
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_SEMICOLON)
{
error_at (token->location, "%<_Cilk_spawn%> must be followed by "
"an expression");
postfix_expression = error_mark_node;
break;
}
else if (!current_function_decl)
{
error_at (token->location, "%<_Cilk_spawn%> may only be used "
"inside a function");
postfix_expression = error_mark_node;
break;
}
else
{
/* Consecutive _Cilk_spawns are not allowed in a statement. */
saved_in_statement = parser->in_statement;
parser->in_statement |= IN_CILK_SPAWN;
}
cfun->calls_cilk_spawn = 1;
postfix_expression =
cp_parser_postfix_expression (parser, false, false,
false, false, &idk);
if (!flag_cilkplus)
{
error_at (token->location, "-fcilkplus must be enabled to use"
" %<_Cilk_spawn%>");
cfun->calls_cilk_spawn = 0;
}
else if (saved_in_statement & IN_CILK_SPAWN)
{
error_at (token->location, "consecutive %<_Cilk_spawn%> keywords "
"are not permitted");
postfix_expression = error_mark_node;
cfun->calls_cilk_spawn = 0;
}
else
{
postfix_expression = build_cilk_spawn (token->location,
postfix_expression);
if (postfix_expression != error_mark_node)
SET_EXPR_LOCATION (postfix_expression, input_location);
parser->in_statement = parser->in_statement & ~IN_CILK_SPAWN;
}
break;
}
case RID_BUILTIN_SHUFFLE:
{
vec<tree, va_gc> *vec;
unsigned int i;
tree p;
cp_lexer_consume_token (parser->lexer);
vec = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/false, /*allow_expansion_p=*/true,
/*non_constant_p=*/NULL);
if (vec == NULL)
return error_mark_node;
FOR_EACH_VEC_ELT (*vec, i, p)
mark_exp_read (p);
if (vec->length () == 2)
return build_x_vec_perm_expr (loc, (*vec)[0], NULL_TREE, (*vec)[1],
tf_warning_or_error);
else if (vec->length () == 3)
return build_x_vec_perm_expr (loc, (*vec)[0], (*vec)[1], (*vec)[2],
tf_warning_or_error);
else
{
error_at (loc, "wrong number of arguments to "
"%<__builtin_shuffle%>");
return error_mark_node;
}
break;
}
default:
{
tree type;
/* If the next thing is a simple-type-specifier, we may be
looking at a functional cast. We could also be looking at
an id-expression. So, we try the functional cast, and if
that doesn't work we fall back to the primary-expression. */
cp_parser_parse_tentatively (parser);
/* Look for the simple-type-specifier. */
type = cp_parser_simple_type_specifier (parser,
/*decl_specs=*/NULL,
CP_PARSER_FLAGS_NONE);
/* Parse the cast itself. */
if (!cp_parser_error_occurred (parser))
postfix_expression
= cp_parser_functional_cast (parser, type);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
break;
/* If the functional-cast didn't work out, try a
compound-literal. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree initializer = NULL_TREE;
cp_parser_parse_tentatively (parser);
/* Avoid calling cp_parser_type_id pointlessly, see comment
in cp_parser_cast_expression about c++/29234. */
if (!cp_parser_compound_literal_p (parser))
cp_parser_simulate_error (parser);
else
{
/* Parse the type. */
bool saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
type = cp_parser_type_id (parser);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
/* If things aren't going well, there's no need to
keep going. */
if (!cp_parser_error_occurred (parser))
{
bool non_constant_p;
/* Parse the brace-enclosed initializer list. */
initializer = cp_parser_braced_list (parser,
&non_constant_p);
}
/* If that worked, we're definitely looking at a
compound-literal expression. */
if (cp_parser_parse_definitely (parser))
{
/* Warn the user that a compound literal is not
allowed in standard C++. */
pedwarn (input_location, OPT_Wpedantic,
"ISO C++ forbids compound-literals");
/* For simplicity, we disallow compound literals in
constant-expressions. We could
allow compound literals of integer type, whose
initializer was a constant, in constant
expressions. Permitting that usage, as a further
extension, would not change the meaning of any
currently accepted programs. (Of course, as
compound literals are not part of ISO C++, the
standard has nothing to say.) */
if (cp_parser_non_integral_constant_expression (parser,
NIC_NCC))
{
postfix_expression = error_mark_node;
break;
}
/* Form the representation of the compound-literal. */
postfix_expression
= finish_compound_literal (type, initializer,
tf_warning_or_error);
break;
}
}
/* It must be a primary-expression. */
postfix_expression
= cp_parser_primary_expression (parser, address_p, cast_p,
/*template_arg_p=*/false,
decltype_p,
&idk);
}
break;
}
/* Note that we don't need to worry about calling build_cplus_new on a
class-valued CALL_EXPR in decltype when it isn't the end of the
postfix-expression; unary_complex_lvalue will take care of that for
all these cases. */
/* Keep looping until the postfix-expression is complete. */
while (true)
{
if (idk == CP_ID_KIND_UNQUALIFIED
&& identifier_p (postfix_expression)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN))
/* It is not a Koenig lookup function call. */
postfix_expression
= unqualified_name_lookup_error (postfix_expression);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_OPEN_SQUARE:
if (cp_next_tokens_can_be_std_attribute_p (parser))
{
cp_parser_error (parser,
"two consecutive %<[%> shall "
"only introduce an attribute");
return error_mark_node;
}
postfix_expression
= cp_parser_postfix_open_square_expression (parser,
postfix_expression,
false,
decltype_p);
idk = CP_ID_KIND_NONE;
is_member_access = false;
break;
case CPP_OPEN_PAREN:
/* postfix-expression ( expression-list [opt] ) */
{
bool koenig_p;
bool is_builtin_constant_p;
bool saved_integral_constant_expression_p = false;
bool saved_non_integral_constant_expression_p = false;
tsubst_flags_t complain = complain_flags (decltype_p);
vec<tree, va_gc> *args;
is_member_access = false;
is_builtin_constant_p
= DECL_IS_BUILTIN_CONSTANT_P (postfix_expression);
if (is_builtin_constant_p)
{
/* The whole point of __builtin_constant_p is to allow
non-constant expressions to appear as arguments. */
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
}
args = (cp_parser_parenthesized_expression_list
(parser, non_attr,
/*cast_p=*/false, /*allow_expansion_p=*/true,
/*non_constant_p=*/NULL,
/*want_literal_zero_p=*/warn_memset_transposed_args));
if (is_builtin_constant_p)
{
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
}
if (args == NULL)
{
postfix_expression = error_mark_node;
break;
}
/* Function calls are not permitted in
constant-expressions. */
if (! builtin_valid_in_constant_expr_p (postfix_expression)
&& cp_parser_non_integral_constant_expression (parser,
NIC_FUNC_CALL))
{
postfix_expression = error_mark_node;
release_tree_vector (args);
break;
}
koenig_p = false;
if (idk == CP_ID_KIND_UNQUALIFIED
|| idk == CP_ID_KIND_TEMPLATE_ID)
{
if (identifier_p (postfix_expression))
{
if (!args->is_empty ())
{
koenig_p = true;
if (!any_type_dependent_arguments_p (args))
postfix_expression
= perform_koenig_lookup (postfix_expression, args,
complain);
}
else
postfix_expression
= unqualified_fn_lookup_error (postfix_expression);
}
/* We do not perform argument-dependent lookup if
normal lookup finds a non-function, in accordance
with the expected resolution of DR 218. */
else if (!args->is_empty ()
&& is_overloaded_fn (postfix_expression))
{
tree fn = get_first_fn (postfix_expression);
fn = STRIP_TEMPLATE (fn);
/* Do not do argument dependent lookup if regular
lookup finds a member function or a block-scope
function declaration. [basic.lookup.argdep]/3 */
if (!DECL_FUNCTION_MEMBER_P (fn)
&& !DECL_LOCAL_FUNCTION_P (fn))
{
koenig_p = true;
if (!any_type_dependent_arguments_p (args))
postfix_expression
= perform_koenig_lookup (postfix_expression, args,
complain);
}
}
}
if (warn_memset_transposed_args)
{
if (TREE_CODE (postfix_expression) == FUNCTION_DECL
&& DECL_BUILT_IN_CLASS (postfix_expression) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (postfix_expression) == BUILT_IN_MEMSET
&& vec_safe_length (args) == 3
&& integer_zerop ((*args)[2])
&& LITERAL_ZERO_P ((*args)[2])
&& !(integer_zerop ((*args)[1])
&& LITERAL_ZERO_P ((*args)[1])))
warning (OPT_Wmemset_transposed_args,
"%<memset%> used with constant zero length "
"parameter; this could be due to transposed "
"parameters");
/* Replace LITERAL_ZERO_P INTEGER_CSTs with normal ones
to avoid leaking those into folder and middle-end. */
unsigned int i;
tree arg;
FOR_EACH_VEC_SAFE_ELT (args, i, arg)
if (TREE_CODE (arg) == INTEGER_CST && LITERAL_ZERO_P (arg))
(*args)[i] = build_int_cst (TREE_TYPE (arg), 0);
}
if (TREE_CODE (postfix_expression) == COMPONENT_REF)
{
tree instance = TREE_OPERAND (postfix_expression, 0);
tree fn = TREE_OPERAND (postfix_expression, 1);
if (processing_template_decl
&& (type_dependent_expression_p (instance)
|| (!BASELINK_P (fn)
&& TREE_CODE (fn) != FIELD_DECL)
|| type_dependent_expression_p (fn)
|| any_type_dependent_arguments_p (args)))
{
postfix_expression
= build_nt_call_vec (postfix_expression, args);
release_tree_vector (args);
break;
}
if (BASELINK_P (fn))
{
postfix_expression
= (build_new_method_call
(instance, fn, &args, NULL_TREE,
(idk == CP_ID_KIND_QUALIFIED
? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL
: LOOKUP_NORMAL),
/*fn_p=*/NULL,
complain));
}
else
postfix_expression
= finish_call_expr (postfix_expression, &args,
/*disallow_virtual=*/false,
/*koenig_p=*/false,
complain);
}
else if (TREE_CODE (postfix_expression) == OFFSET_REF
|| TREE_CODE (postfix_expression) == MEMBER_REF
|| TREE_CODE (postfix_expression) == DOTSTAR_EXPR)
postfix_expression = (build_offset_ref_call_from_tree
(postfix_expression, &args,
complain));
else if (idk == CP_ID_KIND_QUALIFIED)
/* A call to a static class member, or a namespace-scope
function. */
postfix_expression
= finish_call_expr (postfix_expression, &args,
/*disallow_virtual=*/true,
koenig_p,
complain);
else
/* All other function calls. */
postfix_expression
= finish_call_expr (postfix_expression, &args,
/*disallow_virtual=*/false,
koenig_p,
complain);
protected_set_expr_location (postfix_expression, token->location);
/* The POSTFIX_EXPRESSION is certainly no longer an id. */
idk = CP_ID_KIND_NONE;
release_tree_vector (args);
}
break;
case CPP_DOT:
case CPP_DEREF:
/* postfix-expression . template [opt] id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> template [opt] id-expression
postfix-expression -> pseudo-destructor-name */
/* Consume the `.' or `->' operator. */
cp_lexer_consume_token (parser->lexer);
postfix_expression
= cp_parser_postfix_dot_deref_expression (parser, token->type,
postfix_expression,
false, &idk, loc);
is_member_access = true;
break;
case CPP_PLUS_PLUS:
/* postfix-expression ++ */
/* Consume the `++' token. */
cp_lexer_consume_token (parser->lexer);
/* Generate a representation for the complete expression. */
postfix_expression
= finish_increment_expr (postfix_expression,
POSTINCREMENT_EXPR);
/* Increments may not appear in constant-expressions. */
if (cp_parser_non_integral_constant_expression (parser, NIC_INC))
postfix_expression = error_mark_node;
idk = CP_ID_KIND_NONE;
is_member_access = false;
break;
case CPP_MINUS_MINUS:
/* postfix-expression -- */
/* Consume the `--' token. */
cp_lexer_consume_token (parser->lexer);
/* Generate a representation for the complete expression. */
postfix_expression
= finish_increment_expr (postfix_expression,
POSTDECREMENT_EXPR);
/* Decrements may not appear in constant-expressions. */
if (cp_parser_non_integral_constant_expression (parser, NIC_DEC))
postfix_expression = error_mark_node;
idk = CP_ID_KIND_NONE;
is_member_access = false;
break;
default:
if (pidk_return != NULL)
* pidk_return = idk;
if (member_access_only_p)
return is_member_access? postfix_expression : error_mark_node;
else
return postfix_expression;
}
}
/* We should never get here. */
gcc_unreachable ();
return error_mark_node;
}
/* This function parses Cilk Plus array notations. If a normal array expr. is
parsed then the array index is passed back to the caller through *INIT_INDEX
and the function returns a NULL_TREE. If array notation expr. is parsed,
then *INIT_INDEX is ignored by the caller and the function returns
a tree of type ARRAY_NOTATION_REF. If some error occurred it returns
error_mark_node. */
static tree
cp_parser_array_notation (location_t loc, cp_parser *parser, tree *init_index,
tree array_value)
{
cp_token *token = NULL;
tree length_index, stride = NULL_TREE, value_tree, array_type;
if (!array_value || array_value == error_mark_node)
{
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
array_type = TREE_TYPE (array_value);
bool saved_colon_corrects = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
token = cp_lexer_peek_token (parser->lexer);
if (!token)
{
cp_parser_error (parser, "expected %<:%> or numeral");
return error_mark_node;
}
else if (token->type == CPP_COLON)
{
/* Consume the ':'. */
cp_lexer_consume_token (parser->lexer);
/* If we are here, then we have a case like this A[:]. */
if (cp_lexer_peek_token (parser->lexer)->type != CPP_CLOSE_SQUARE)
{
cp_parser_error (parser, "expected %<]%>");
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
*init_index = NULL_TREE;
stride = NULL_TREE;
length_index = NULL_TREE;
}
else
{
/* If we are here, then there are three valid possibilities:
1. ARRAY [ EXP ]
2. ARRAY [ EXP : EXP ]
3. ARRAY [ EXP : EXP : EXP ] */
*init_index = cp_parser_expression (parser);
if (cp_lexer_peek_token (parser->lexer)->type != CPP_COLON)
{
/* This indicates that we have a normal array expression. */
parser->colon_corrects_to_scope_p = saved_colon_corrects;
return NULL_TREE;
}
/* Consume the ':'. */
cp_lexer_consume_token (parser->lexer);
length_index = cp_parser_expression (parser);
if (cp_lexer_peek_token (parser->lexer)->type == CPP_COLON)
{
cp_lexer_consume_token (parser->lexer);
stride = cp_parser_expression (parser);
}
}
parser->colon_corrects_to_scope_p = saved_colon_corrects;
if (*init_index == error_mark_node || length_index == error_mark_node
|| stride == error_mark_node || array_type == error_mark_node)
{
if (cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_SQUARE)
cp_lexer_consume_token (parser->lexer);
return error_mark_node;
}
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
value_tree = build_array_notation_ref (loc, array_value, *init_index,
length_index, stride, array_type);
return value_tree;
}
/* A subroutine of cp_parser_postfix_expression that also gets hijacked
by cp_parser_builtin_offsetof. We're looking for
postfix-expression [ expression ]
postfix-expression [ braced-init-list ] (C++11)
FOR_OFFSETOF is set if we're being called in that context, which
changes how we deal with integer constant expressions. */
static tree
cp_parser_postfix_open_square_expression (cp_parser *parser,
tree postfix_expression,
bool for_offsetof,
bool decltype_p)
{
tree index = NULL_TREE;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
bool saved_greater_than_is_operator_p;
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
saved_greater_than_is_operator_p = parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
/* Parse the index expression. */
/* ??? For offsetof, there is a question of what to allow here. If
offsetof is not being used in an integral constant expression context,
then we *could* get the right answer by computing the value at runtime.
If we are in an integral constant expression context, then we might
could accept any constant expression; hard to say without analysis.
Rather than open the barn door too wide right away, allow only integer
constant expressions here. */
if (for_offsetof)
index = cp_parser_constant_expression (parser);
else
{
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
bool expr_nonconst_p;
cp_lexer_set_source_position (parser->lexer);
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
index = cp_parser_braced_list (parser, &expr_nonconst_p);
if (flag_cilkplus
&& cp_lexer_peek_token (parser->lexer)->type == CPP_COLON)
{
error_at (cp_lexer_peek_token (parser->lexer)->location,
"braced list index is not allowed with array "
"notation");
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
}
else if (flag_cilkplus)
{
/* Here are have these two options:
ARRAY[EXP : EXP] - Array notation expr with default
stride of 1.
ARRAY[EXP : EXP : EXP] - Array Notation with user-defined
stride. */
tree an_exp = cp_parser_array_notation (loc, parser, &index,
postfix_expression);
if (an_exp)
return an_exp;
}
else
index = cp_parser_expression (parser);
}
parser->greater_than_is_operator_p = saved_greater_than_is_operator_p;
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
/* Build the ARRAY_REF. */
postfix_expression = grok_array_decl (loc, postfix_expression,
index, decltype_p);
/* When not doing offsetof, array references are not permitted in
constant-expressions. */
if (!for_offsetof
&& (cp_parser_non_integral_constant_expression (parser, NIC_ARRAY_REF)))
postfix_expression = error_mark_node;
return postfix_expression;
}
/* A subroutine of cp_parser_postfix_expression that also gets hijacked
by cp_parser_builtin_offsetof. We're looking for
postfix-expression . template [opt] id-expression
postfix-expression . pseudo-destructor-name
postfix-expression -> template [opt] id-expression
postfix-expression -> pseudo-destructor-name
FOR_OFFSETOF is set if we're being called in that context. That sorta
limits what of the above we'll actually accept, but nevermind.
TOKEN_TYPE is the "." or "->" token, which will already have been
removed from the stream. */
static tree
cp_parser_postfix_dot_deref_expression (cp_parser *parser,
enum cpp_ttype token_type,
tree postfix_expression,
bool for_offsetof, cp_id_kind *idk,
location_t location)
{
tree name;
bool dependent_p;
bool pseudo_destructor_p;
tree scope = NULL_TREE;
/* If this is a `->' operator, dereference the pointer. */
if (token_type == CPP_DEREF)
postfix_expression = build_x_arrow (location, postfix_expression,
tf_warning_or_error);
/* Check to see whether or not the expression is type-dependent. */
dependent_p = type_dependent_expression_p (postfix_expression);
/* The identifier following the `->' or `.' is not qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
*idk = CP_ID_KIND_NONE;
/* Enter the scope corresponding to the type of the object
given by the POSTFIX_EXPRESSION. */
if (!dependent_p && TREE_TYPE (postfix_expression) != NULL_TREE)
{
scope = TREE_TYPE (postfix_expression);
/* According to the standard, no expression should ever have
reference type. Unfortunately, we do not currently match
the standard in this respect in that our internal representation
of an expression may have reference type even when the standard
says it does not. Therefore, we have to manually obtain the
underlying type here. */
scope = non_reference (scope);
/* The type of the POSTFIX_EXPRESSION must be complete. */
if (scope == unknown_type_node)
{
error_at (location, "%qE does not have class type",
postfix_expression);
scope = NULL_TREE;
}
/* Unlike the object expression in other contexts, *this is not
required to be of complete type for purposes of class member
access (5.2.5) outside the member function body. */
else if (postfix_expression != current_class_ref
&& !(processing_template_decl && scope == current_class_type))
scope = complete_type_or_else (scope, NULL_TREE);
/* Let the name lookup machinery know that we are processing a
class member access expression. */
parser->context->object_type = scope;
/* If something went wrong, we want to be able to discern that case,
as opposed to the case where there was no SCOPE due to the type
of expression being dependent. */
if (!scope)
scope = error_mark_node;
/* If the SCOPE was erroneous, make the various semantic analysis
functions exit quickly -- and without issuing additional error
messages. */
if (scope == error_mark_node)
postfix_expression = error_mark_node;
}
/* Assume this expression is not a pseudo-destructor access. */
pseudo_destructor_p = false;
/* If the SCOPE is a scalar type, then, if this is a valid program,
we must be looking at a pseudo-destructor-name. If POSTFIX_EXPRESSION
is type dependent, it can be pseudo-destructor-name or something else.
Try to parse it as pseudo-destructor-name first. */
if ((scope && SCALAR_TYPE_P (scope)) || dependent_p)
{
tree s;
tree type;
cp_parser_parse_tentatively (parser);
/* Parse the pseudo-destructor-name. */
s = NULL_TREE;
cp_parser_pseudo_destructor_name (parser, postfix_expression,
&s, &type);
if (dependent_p
&& (cp_parser_error_occurred (parser)
|| !SCALAR_TYPE_P (type)))
cp_parser_abort_tentative_parse (parser);
else if (cp_parser_parse_definitely (parser))
{
pseudo_destructor_p = true;
postfix_expression
= finish_pseudo_destructor_expr (postfix_expression,
s, type, location);
}
}
if (!pseudo_destructor_p)
{
/* If the SCOPE is not a scalar type, we are looking at an
ordinary class member access expression, rather than a
pseudo-destructor-name. */
bool template_p;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Parse the id-expression. */
name = (cp_parser_id_expression
(parser,
cp_parser_optional_template_keyword (parser),
/*check_dependency_p=*/true,
&template_p,
/*declarator_p=*/false,
/*optional_p=*/false));
/* In general, build a SCOPE_REF if the member name is qualified.
However, if the name was not dependent and has already been
resolved; there is no need to build the SCOPE_REF. For example;
struct X { void f(); };
template <typename T> void f(T* t) { t->X::f(); }
Even though "t" is dependent, "X::f" is not and has been resolved
to a BASELINK; there is no need to include scope information. */
/* But we do need to remember that there was an explicit scope for
virtual function calls. */
if (parser->scope)
*idk = CP_ID_KIND_QUALIFIED;
/* If the name is a template-id that names a type, we will get a
TYPE_DECL here. That is invalid code. */
if (TREE_CODE (name) == TYPE_DECL)
{
error_at (token->location, "invalid use of %qD", name);
postfix_expression = error_mark_node;
}
else
{
if (name != error_mark_node && !BASELINK_P (name) && parser->scope)
{
if (TREE_CODE (parser->scope) == NAMESPACE_DECL)
{
error_at (token->location, "%<%D::%D%> is not a class member",
parser->scope, name);
postfix_expression = error_mark_node;
}
else
name = build_qualified_name (/*type=*/NULL_TREE,
parser->scope,
name,
template_p);
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
if (parser->scope && name && BASELINK_P (name))
adjust_result_of_qualified_name_lookup
(name, parser->scope, scope);
postfix_expression
= finish_class_member_access_expr (postfix_expression, name,
template_p,
tf_warning_or_error);
}
}
/* We no longer need to look up names in the scope of the object on
the left-hand side of the `.' or `->' operator. */
parser->context->object_type = NULL_TREE;
/* Outside of offsetof, these operators may not appear in
constant-expressions. */
if (!for_offsetof
&& (cp_parser_non_integral_constant_expression
(parser, token_type == CPP_DEREF ? NIC_ARROW : NIC_POINT)))
postfix_expression = error_mark_node;
return postfix_expression;
}
/* Cache of LITERAL_ZERO_P constants. */
static GTY(()) tree literal_zeros[itk_none];
/* Parse a parenthesized expression-list.
expression-list:
assignment-expression
expression-list, assignment-expression
attribute-list:
expression-list
identifier
identifier, expression-list
CAST_P is true if this expression is the target of a cast.
ALLOW_EXPANSION_P is true if this expression allows expansion of an
argument pack.
Returns a vector of trees. Each element is a representation of an
assignment-expression. NULL is returned if the ( and or ) are
missing. An empty, but allocated, vector is returned on no
expressions. The parentheses are eaten. IS_ATTRIBUTE_LIST is id_attr
if we are parsing an attribute list for an attribute that wants a
plain identifier argument, normal_attr for an attribute that wants
an expression, or non_attr if we aren't parsing an attribute list. If
NON_CONSTANT_P is non-NULL, *NON_CONSTANT_P indicates whether or
not all of the expressions in the list were constant.
WANT_LITERAL_ZERO_P is true if the caller is interested in
LITERAL_ZERO_P INTEGER_CSTs. FIXME: once we don't fold everything
immediately, this can be removed. */
static vec<tree, va_gc> *
cp_parser_parenthesized_expression_list (cp_parser* parser,
int is_attribute_list,
bool cast_p,
bool allow_expansion_p,
bool *non_constant_p,
bool want_literal_zero_p)
{
vec<tree, va_gc> *expression_list;
bool fold_expr_p = is_attribute_list != non_attr;
tree identifier = NULL_TREE;
bool saved_greater_than_is_operator_p;
/* Assume all the expressions will be constant. */
if (non_constant_p)
*non_constant_p = false;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return NULL;
expression_list = make_tree_vector ();
/* Within a parenthesized expression, a `>' token is always
the greater-than operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
/* Consume expressions until there are no more. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
while (true)
{
tree expr;
/* At the beginning of attribute lists, check to see if the
next token is an identifier. */
if (is_attribute_list == id_attr
&& cp_lexer_peek_token (parser->lexer)->type == CPP_NAME)
{
cp_token *token;
/* Consume the identifier. */
token = cp_lexer_consume_token (parser->lexer);
/* Save the identifier. */
identifier = token->u.value;
}
else
{
bool expr_non_constant_p;
/* Parse the next assignment-expression. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
/* A braced-init-list. */
cp_lexer_set_source_position (parser->lexer);
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
expr = cp_parser_braced_list (parser, &expr_non_constant_p);
if (non_constant_p && expr_non_constant_p)
*non_constant_p = true;
}
else if (non_constant_p)
{
expr = (cp_parser_constant_expression
(parser, /*allow_non_constant_p=*/true,
&expr_non_constant_p));
if (expr_non_constant_p)
*non_constant_p = true;
}
else
{
expr = NULL_TREE;
cp_token *tok = cp_lexer_peek_token (parser->lexer);
switch (tok->type)
{
case CPP_NUMBER:
case CPP_CHAR:
case CPP_WCHAR:
case CPP_CHAR16:
case CPP_CHAR32:
/* If a parameter is literal zero alone, remember it
for -Wmemset-transposed-args warning. */
if (integer_zerop (tok->u.value)
&& !TREE_OVERFLOW (tok->u.value)
&& want_literal_zero_p
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_COMMA
|| cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_CLOSE_PAREN))
{
unsigned int i;
for (i = 0; i < itk_none; ++i)
if (TREE_TYPE (tok->u.value) == integer_types[i])
break;
if (i < itk_none && literal_zeros[i])
expr = literal_zeros[i];
else
{
expr = copy_node (tok->u.value);
LITERAL_ZERO_P (expr) = 1;
if (i < itk_none)
literal_zeros[i] = expr;
}
/* Consume the 0 token (or '\0', 0LL etc.). */
cp_lexer_consume_token (parser->lexer);
}
break;
default:
break;
}
if (expr == NULL_TREE)
expr = cp_parser_assignment_expression (parser, /*pidk=*/NULL,
cast_p);
}
if (fold_expr_p)
expr = instantiate_non_dependent_expr (expr);
/* If we have an ellipsis, then this is an expression
expansion. */
if (allow_expansion_p
&& cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
/* Build the argument pack. */
expr = make_pack_expansion (expr);
}
/* Add it to the list. We add error_mark_node
expressions to the list, so that we can still tell if
the correct form for a parenthesized expression-list
is found. That gives better errors. */
vec_safe_push (expression_list, expr);
if (expr == error_mark_node)
goto skip_comma;
}
/* After the first item, attribute lists look the same as
expression lists. */
is_attribute_list = non_attr;
get_comma:;
/* If the next token isn't a `,', then we are done. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Otherwise, consume the `,' and keep going. */
cp_lexer_consume_token (parser->lexer);
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
int ending;
skip_comma:;
/* We try and resync to an unnested comma, as that will give the
user better diagnostics. */
ending = cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/true);
if (ending < 0)
goto get_comma;
if (!ending)
{
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
return NULL;
}
}
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
if (identifier)
vec_safe_insert (expression_list, 0, identifier);
return expression_list;
}
/* Parse a pseudo-destructor-name.
pseudo-destructor-name:
:: [opt] nested-name-specifier [opt] type-name :: ~ type-name
:: [opt] nested-name-specifier template template-id :: ~ type-name
:: [opt] nested-name-specifier [opt] ~ type-name
If either of the first two productions is used, sets *SCOPE to the
TYPE specified before the final `::'. Otherwise, *SCOPE is set to
NULL_TREE. *TYPE is set to the TYPE_DECL for the final type-name,
or ERROR_MARK_NODE if the parse fails. */
static void
cp_parser_pseudo_destructor_name (cp_parser* parser,
tree object,
tree* scope,
tree* type)
{
bool nested_name_specifier_p;
/* Handle ~auto. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMPL)
&& cp_lexer_nth_token_is_keyword (parser->lexer, 2, RID_AUTO)
&& !type_dependent_expression_p (object))
{
if (cxx_dialect < cxx14)
pedwarn (input_location, 0,
"%<~auto%> only available with "
"-std=c++14 or -std=gnu++14");
cp_lexer_consume_token (parser->lexer);
cp_lexer_consume_token (parser->lexer);
*scope = NULL_TREE;
*type = TREE_TYPE (object);
return;
}
/* Assume that things will not work out. */
*type = error_mark_node;
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/true);
/* Look for the optional nested-name-specifier. */
nested_name_specifier_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/false)
!= NULL_TREE);
/* Now, if we saw a nested-name-specifier, we might be doing the
second production. */
if (nested_name_specifier_p
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
/* Consume the `template' keyword. */
cp_lexer_consume_token (parser->lexer);
/* Parse the template-id. */
cp_parser_template_id (parser,
/*template_keyword_p=*/true,
/*check_dependency_p=*/false,
class_type,
/*is_declaration=*/true);
/* Look for the `::' token. */
cp_parser_require (parser, CPP_SCOPE, RT_SCOPE);
}
/* If the next token is not a `~', then there might be some
additional qualification. */
else if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMPL))
{
/* At this point, we're looking for "type-name :: ~". The type-name
must not be a class-name, since this is a pseudo-destructor. So,
it must be either an enum-name, or a typedef-name -- both of which
are just identifiers. So, we peek ahead to check that the "::"
and "~" tokens are present; if they are not, then we can avoid
calling type_name. */
if (cp_lexer_peek_token (parser->lexer)->type != CPP_NAME
|| cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE
|| cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_COMPL)
{
cp_parser_error (parser, "non-scalar type");
return;
}
/* Look for the type-name. */
*scope = TREE_TYPE (cp_parser_nonclass_name (parser));
if (*scope == error_mark_node)
return;
/* Look for the `::' token. */
cp_parser_require (parser, CPP_SCOPE, RT_SCOPE);
}
else
*scope = NULL_TREE;
/* Look for the `~'. */
cp_parser_require (parser, CPP_COMPL, RT_COMPL);
/* Once we see the ~, this has to be a pseudo-destructor. */
if (!processing_template_decl && !cp_parser_error_occurred (parser))
cp_parser_commit_to_topmost_tentative_parse (parser);
/* Look for the type-name again. We are not responsible for
checking that it matches the first type-name. */
*type = TREE_TYPE (cp_parser_nonclass_name (parser));
}
/* Parse a unary-expression.
unary-expression:
postfix-expression
++ cast-expression
-- cast-expression
unary-operator cast-expression
sizeof unary-expression
sizeof ( type-id )
alignof ( type-id ) [C++0x]
new-expression
delete-expression
GNU Extensions:
unary-expression:
__extension__ cast-expression
__alignof__ unary-expression
__alignof__ ( type-id )
alignof unary-expression [C++0x]
__real__ cast-expression
__imag__ cast-expression
&& identifier
sizeof ( type-id ) { initializer-list , [opt] }
alignof ( type-id ) { initializer-list , [opt] } [C++0x]
__alignof__ ( type-id ) { initializer-list , [opt] }
ADDRESS_P is true iff the unary-expression is appearing as the
operand of the `&' operator. CAST_P is true if this expression is
the target of a cast.
Returns a representation of the expression. */
static tree
cp_parser_unary_expression (cp_parser *parser, cp_id_kind * pidk,
bool address_p, bool cast_p, bool decltype_p)
{
cp_token *token;
enum tree_code unary_operator;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Some keywords give away the kind of expression. */
if (token->type == CPP_KEYWORD)
{
enum rid keyword = token->keyword;
switch (keyword)
{
case RID_ALIGNOF:
case RID_SIZEOF:
{
tree operand, ret;
enum tree_code op;
location_t first_loc;
op = keyword == RID_ALIGNOF ? ALIGNOF_EXPR : SIZEOF_EXPR;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
first_loc = cp_lexer_peek_token (parser->lexer)->location;
/* Parse the operand. */
operand = cp_parser_sizeof_operand (parser, keyword);
if (TYPE_P (operand))
ret = cxx_sizeof_or_alignof_type (operand, op, true);
else
{
/* ISO C++ defines alignof only with types, not with
expressions. So pedwarn if alignof is used with a non-
type expression. However, __alignof__ is ok. */
if (!strcmp (IDENTIFIER_POINTER (token->u.value), "alignof"))
pedwarn (token->location, OPT_Wpedantic,
"ISO C++ does not allow %<alignof%> "
"with a non-type");
ret = cxx_sizeof_or_alignof_expr (operand, op, true);
}
/* For SIZEOF_EXPR, just issue diagnostics, but keep
SIZEOF_EXPR with the original operand. */
if (op == SIZEOF_EXPR && ret != error_mark_node)
{
if (TREE_CODE (ret) != SIZEOF_EXPR || TYPE_P (operand))
{
if (!processing_template_decl && TYPE_P (operand))
{
ret = build_min (SIZEOF_EXPR, size_type_node,
build1 (NOP_EXPR, operand,
error_mark_node));
SIZEOF_EXPR_TYPE_P (ret) = 1;
}
else
ret = build_min (SIZEOF_EXPR, size_type_node, operand);
TREE_SIDE_EFFECTS (ret) = 0;
TREE_READONLY (ret) = 1;
}
SET_EXPR_LOCATION (ret, first_loc);
}
return ret;
}
case RID_NEW:
return cp_parser_new_expression (parser);
case RID_DELETE:
return cp_parser_delete_expression (parser);
case RID_EXTENSION:
{
/* The saved value of the PEDANTIC flag. */
int saved_pedantic;
tree expr;
/* Save away the PEDANTIC flag. */
cp_parser_extension_opt (parser, &saved_pedantic);
/* Parse the cast-expression. */
expr = cp_parser_simple_cast_expression (parser);
/* Restore the PEDANTIC flag. */
pedantic = saved_pedantic;
return expr;
}
case RID_REALPART:
case RID_IMAGPART:
{
tree expression;
/* Consume the `__real__' or `__imag__' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the cast-expression. */
expression = cp_parser_simple_cast_expression (parser);
/* Create the complete representation. */
return build_x_unary_op (token->location,
(keyword == RID_REALPART
? REALPART_EXPR : IMAGPART_EXPR),
expression,
tf_warning_or_error);
}
break;
case RID_TRANSACTION_ATOMIC:
case RID_TRANSACTION_RELAXED:
return cp_parser_transaction_expression (parser, keyword);
case RID_NOEXCEPT:
{
tree expr;
const char *saved_message;
bool saved_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
bool saved_greater_than_is_operator_p;
cp_lexer_consume_token (parser->lexer);
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in %<noexcept%> expressions");
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
++cp_noexcept_operand;
expr = cp_parser_expression (parser);
--cp_noexcept_operand;
--c_inhibit_evaluation_warnings;
--cp_unevaluated_operand;
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
parser->type_definition_forbidden_message = saved_message;
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return finish_noexcept_expr (expr, tf_warning_or_error);
}
default:
break;
}
}
/* Look for the `:: new' and `:: delete', which also signal the
beginning of a new-expression, or delete-expression,
respectively. If the next token is `::', then it might be one of
these. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
{
enum rid keyword;
/* See if the token after the `::' is one of the keywords in
which we're interested. */
keyword = cp_lexer_peek_nth_token (parser->lexer, 2)->keyword;
/* If it's `new', we have a new-expression. */
if (keyword == RID_NEW)
return cp_parser_new_expression (parser);
/* Similarly, for `delete'. */
else if (keyword == RID_DELETE)
return cp_parser_delete_expression (parser);
}
/* Look for a unary operator. */
unary_operator = cp_parser_unary_operator (token);
/* The `++' and `--' operators can be handled similarly, even though
they are not technically unary-operators in the grammar. */
if (unary_operator == ERROR_MARK)
{
if (token->type == CPP_PLUS_PLUS)
unary_operator = PREINCREMENT_EXPR;
else if (token->type == CPP_MINUS_MINUS)
unary_operator = PREDECREMENT_EXPR;
/* Handle the GNU address-of-label extension. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& token->type == CPP_AND_AND)
{
tree identifier;
tree expression;
location_t loc = token->location;
/* Consume the '&&' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
/* Create an expression representing the address. */
expression = finish_label_address_expr (identifier, loc);
if (cp_parser_non_integral_constant_expression (parser,
NIC_ADDR_LABEL))
expression = error_mark_node;
return expression;
}
}
if (unary_operator != ERROR_MARK)
{
tree cast_expression;
tree expression = error_mark_node;
non_integral_constant non_constant_p = NIC_NONE;
location_t loc = token->location;
tsubst_flags_t complain = complain_flags (decltype_p);
/* Consume the operator token. */
token = cp_lexer_consume_token (parser->lexer);
/* Parse the cast-expression. */
cast_expression
= cp_parser_cast_expression (parser,
unary_operator == ADDR_EXPR,
/*cast_p=*/false,
/*decltype*/false,
pidk);
/* Now, build an appropriate representation. */
switch (unary_operator)
{
case INDIRECT_REF:
non_constant_p = NIC_STAR;
expression = build_x_indirect_ref (loc, cast_expression,
RO_UNARY_STAR,
complain);
break;
case ADDR_EXPR:
non_constant_p = NIC_ADDR;
/* Fall through. */
case BIT_NOT_EXPR:
expression = build_x_unary_op (loc, unary_operator,
cast_expression,
complain);
break;
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
non_constant_p = unary_operator == PREINCREMENT_EXPR
? NIC_PREINCREMENT : NIC_PREDECREMENT;
/* Fall through. */
case UNARY_PLUS_EXPR:
case NEGATE_EXPR:
case TRUTH_NOT_EXPR:
expression = finish_unary_op_expr (loc, unary_operator,
cast_expression, complain);
break;
default:
gcc_unreachable ();
}
if (non_constant_p != NIC_NONE
&& cp_parser_non_integral_constant_expression (parser,
non_constant_p))
expression = error_mark_node;
return expression;
}
return cp_parser_postfix_expression (parser, address_p, cast_p,
/*member_access_only_p=*/false,
decltype_p,
pidk);
}
/* Returns ERROR_MARK if TOKEN is not a unary-operator. If TOKEN is a
unary-operator, the corresponding tree code is returned. */
static enum tree_code
cp_parser_unary_operator (cp_token* token)
{
switch (token->type)
{
case CPP_MULT:
return INDIRECT_REF;
case CPP_AND:
return ADDR_EXPR;
case CPP_PLUS:
return UNARY_PLUS_EXPR;
case CPP_MINUS:
return NEGATE_EXPR;
case CPP_NOT:
return TRUTH_NOT_EXPR;
case CPP_COMPL:
return BIT_NOT_EXPR;
default:
return ERROR_MARK;
}
}
/* Parse a new-expression.
new-expression:
:: [opt] new new-placement [opt] new-type-id new-initializer [opt]
:: [opt] new new-placement [opt] ( type-id ) new-initializer [opt]
Returns a representation of the expression. */
static tree
cp_parser_new_expression (cp_parser* parser)
{
bool global_scope_p;
vec<tree, va_gc> *placement;
tree type;
vec<tree, va_gc> *initializer;
tree nelts = NULL_TREE;
tree ret;
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the `new' operator. */
cp_parser_require_keyword (parser, RID_NEW, RT_NEW);
/* There's no easy way to tell a new-placement from the
`( type-id )' construct. */
cp_parser_parse_tentatively (parser);
/* Look for a new-placement. */
placement = cp_parser_new_placement (parser);
/* If that didn't work out, there's no new-placement. */
if (!cp_parser_parse_definitely (parser))
{
if (placement != NULL)
release_tree_vector (placement);
placement = NULL;
}
/* If the next token is a `(', then we have a parenthesized
type-id. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
cp_token *token;
const char *saved_message = parser->type_definition_forbidden_message;
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type-id. */
parser->type_definition_forbidden_message
= G_("types may not be defined in a new-expression");
type = cp_parser_type_id (parser);
parser->type_definition_forbidden_message = saved_message;
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
token = cp_lexer_peek_token (parser->lexer);
/* There should not be a direct-new-declarator in this production,
but GCC used to allowed this, so we check and emit a sensible error
message for this case. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
error_at (token->location,
"array bound forbidden after parenthesized type-id");
inform (token->location,
"try removing the parentheses around the type-id");
cp_parser_direct_new_declarator (parser);
}
}
/* Otherwise, there must be a new-type-id. */
else
type = cp_parser_new_type_id (parser, &nelts);
/* If the next token is a `(' or '{', then we have a new-initializer. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
initializer = cp_parser_new_initializer (parser);
else
initializer = NULL;
/* A new-expression may not appear in an integral constant
expression. */
if (cp_parser_non_integral_constant_expression (parser, NIC_NEW))
ret = error_mark_node;
else
{
/* Create a representation of the new-expression. */
ret = build_new (&placement, type, nelts, &initializer, global_scope_p,
tf_warning_or_error);
}
if (placement != NULL)
release_tree_vector (placement);
if (initializer != NULL)
release_tree_vector (initializer);
return ret;
}
/* Parse a new-placement.
new-placement:
( expression-list )
Returns the same representation as for an expression-list. */
static vec<tree, va_gc> *
cp_parser_new_placement (cp_parser* parser)
{
vec<tree, va_gc> *expression_list;
/* Parse the expression-list. */
expression_list = (cp_parser_parenthesized_expression_list
(parser, non_attr, /*cast_p=*/false,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL));
return expression_list;
}
/* Parse a new-type-id.
new-type-id:
type-specifier-seq new-declarator [opt]
Returns the TYPE allocated. If the new-type-id indicates an array
type, *NELTS is set to the number of elements in the last array
bound; the TYPE will not include the last array bound. */
static tree
cp_parser_new_type_id (cp_parser* parser, tree *nelts)
{
cp_decl_specifier_seq type_specifier_seq;
cp_declarator *new_declarator;
cp_declarator *declarator;
cp_declarator *outer_declarator;
const char *saved_message;
/* The type-specifier sequence must not contain type definitions.
(It cannot contain declarations of new types either, but if they
are not definitions we will catch that because they are not
complete.) */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in a new-type-id");
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/false,
/*is_trailing_return=*/false,
&type_specifier_seq);
/* Restore the old message. */
parser->type_definition_forbidden_message = saved_message;
if (type_specifier_seq.type == error_mark_node)
return error_mark_node;
/* Parse the new-declarator. */
new_declarator = cp_parser_new_declarator_opt (parser);
/* Determine the number of elements in the last array dimension, if
any. */
*nelts = NULL_TREE;
/* Skip down to the last array dimension. */
declarator = new_declarator;
outer_declarator = NULL;
while (declarator && (declarator->kind == cdk_pointer
|| declarator->kind == cdk_ptrmem))
{
outer_declarator = declarator;
declarator = declarator->declarator;
}
while (declarator
&& declarator->kind == cdk_array
&& declarator->declarator
&& declarator->declarator->kind == cdk_array)
{
outer_declarator = declarator;
declarator = declarator->declarator;
}
if (declarator && declarator->kind == cdk_array)
{
*nelts = declarator->u.array.bounds;
if (*nelts == error_mark_node)
*nelts = integer_one_node;
if (outer_declarator)
outer_declarator->declarator = declarator->declarator;
else
new_declarator = NULL;
}
return groktypename (&type_specifier_seq, new_declarator, false);
}
/* Parse an (optional) new-declarator.
new-declarator:
ptr-operator new-declarator [opt]
direct-new-declarator
Returns the declarator. */
static cp_declarator *
cp_parser_new_declarator_opt (cp_parser* parser)
{
enum tree_code code;
tree type, std_attributes = NULL_TREE;
cp_cv_quals cv_quals;
/* We don't know if there's a ptr-operator next, or not. */
cp_parser_parse_tentatively (parser);
/* Look for a ptr-operator. */
code = cp_parser_ptr_operator (parser, &type, &cv_quals, &std_attributes);
/* If that worked, look for more new-declarators. */
if (cp_parser_parse_definitely (parser))
{
cp_declarator *declarator;
/* Parse another optional declarator. */
declarator = cp_parser_new_declarator_opt (parser);
declarator = cp_parser_make_indirect_declarator
(code, type, cv_quals, declarator, std_attributes);
return declarator;
}
/* If the next token is a `[', there is a direct-new-declarator. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
return cp_parser_direct_new_declarator (parser);
return NULL;
}
/* Parse a direct-new-declarator.
direct-new-declarator:
[ expression ]
direct-new-declarator [constant-expression]
*/
static cp_declarator *
cp_parser_direct_new_declarator (cp_parser* parser)
{
cp_declarator *declarator = NULL;
while (true)
{
tree expression;
cp_token *token;
/* Look for the opening `['. */
cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE);
token = cp_lexer_peek_token (parser->lexer);
expression = cp_parser_expression (parser);
/* The standard requires that the expression have integral
type. DR 74 adds enumeration types. We believe that the
real intent is that these expressions be handled like the
expression in a `switch' condition, which also allows
classes with a single conversion to integral or
enumeration type. */
if (!processing_template_decl)
{
expression
= build_expr_type_conversion (WANT_INT | WANT_ENUM,
expression,
/*complain=*/true);
if (!expression)
{
error_at (token->location,
"expression in new-declarator must have integral "
"or enumeration type");
expression = error_mark_node;
}
}
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
/* Add this bound to the declarator. */
declarator = make_array_declarator (declarator, expression);
/* If the next token is not a `[', then there are no more
bounds. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE))
break;
}
return declarator;
}
/* Parse a new-initializer.
new-initializer:
( expression-list [opt] )
braced-init-list
Returns a representation of the expression-list. */
static vec<tree, va_gc> *
cp_parser_new_initializer (cp_parser* parser)
{
vec<tree, va_gc> *expression_list;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
tree t;
bool expr_non_constant_p;
cp_lexer_set_source_position (parser->lexer);
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
t = cp_parser_braced_list (parser, &expr_non_constant_p);
CONSTRUCTOR_IS_DIRECT_INIT (t) = 1;
expression_list = make_tree_vector_single (t);
}
else
expression_list = (cp_parser_parenthesized_expression_list
(parser, non_attr, /*cast_p=*/false,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL));
return expression_list;
}
/* Parse a delete-expression.
delete-expression:
:: [opt] delete cast-expression
:: [opt] delete [ ] cast-expression
Returns a representation of the expression. */
static tree
cp_parser_delete_expression (cp_parser* parser)
{
bool global_scope_p;
bool array_p;
tree expression;
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the `delete' keyword. */
cp_parser_require_keyword (parser, RID_DELETE, RT_DELETE);
/* See if the array syntax is in use. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `]' token. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
/* Remember that this is the `[]' construct. */
array_p = true;
}
else
array_p = false;
/* Parse the cast-expression. */
expression = cp_parser_simple_cast_expression (parser);
/* A delete-expression may not appear in an integral constant
expression. */
if (cp_parser_non_integral_constant_expression (parser, NIC_DEL))
return error_mark_node;
return delete_sanity (expression, NULL_TREE, array_p, global_scope_p,
tf_warning_or_error);
}
/* Returns 1 if TOKEN may start a cast-expression and isn't '++', '--',
neither '[' in C++11; -1 if TOKEN is '++', '--', or '[' in C++11;
0 otherwise. */
static int
cp_parser_tokens_start_cast_expression (cp_parser *parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_COMMA:
case CPP_SEMICOLON:
case CPP_QUERY:
case CPP_COLON:
case CPP_CLOSE_SQUARE:
case CPP_CLOSE_PAREN:
case CPP_CLOSE_BRACE:
case CPP_OPEN_BRACE:
case CPP_DOT:
case CPP_DOT_STAR:
case CPP_DEREF:
case CPP_DEREF_STAR:
case CPP_DIV:
case CPP_MOD:
case CPP_LSHIFT:
case CPP_RSHIFT:
case CPP_LESS:
case CPP_GREATER:
case CPP_LESS_EQ:
case CPP_GREATER_EQ:
case CPP_EQ_EQ:
case CPP_NOT_EQ:
case CPP_EQ:
case CPP_MULT_EQ:
case CPP_DIV_EQ:
case CPP_MOD_EQ:
case CPP_PLUS_EQ:
case CPP_MINUS_EQ:
case CPP_RSHIFT_EQ:
case CPP_LSHIFT_EQ:
case CPP_AND_EQ:
case CPP_XOR_EQ:
case CPP_OR_EQ:
case CPP_XOR:
case CPP_OR:
case CPP_OR_OR:
case CPP_EOF:
case CPP_ELLIPSIS:
return 0;
case CPP_OPEN_PAREN:
/* In ((type ()) () the last () isn't a valid cast-expression,
so the whole must be parsed as postfix-expression. */
return cp_lexer_peek_nth_token (parser->lexer, 2)->type
!= CPP_CLOSE_PAREN;
case CPP_OPEN_SQUARE:
/* '[' may start a primary-expression in obj-c++ and in C++11,
as a lambda-expression, eg, '(void)[]{}'. */
if (cxx_dialect >= cxx11)
return -1;
return c_dialect_objc ();
case CPP_PLUS_PLUS:
case CPP_MINUS_MINUS:
/* '++' and '--' may or may not start a cast-expression:
struct T { void operator++(int); };
void f() { (T())++; }
vs
int a;
(int)++a; */
return -1;
default:
return 1;
}
}
/* Parse a cast-expression.
cast-expression:
unary-expression
( type-id ) cast-expression
ADDRESS_P is true iff the unary-expression is appearing as the
operand of the `&' operator. CAST_P is true if this expression is
the target of a cast.
Returns a representation of the expression. */
static tree
cp_parser_cast_expression (cp_parser *parser, bool address_p, bool cast_p,
bool decltype_p, cp_id_kind * pidk)
{
/* If it's a `(', then we might be looking at a cast. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree type = NULL_TREE;
tree expr = NULL_TREE;
int cast_expression = 0;
const char *saved_message;
/* There's no way to know yet whether or not this is a cast.
For example, `(int (3))' is a unary-expression, while `(int)
3' is a cast. So, we resort to parsing tentatively. */
cp_parser_parse_tentatively (parser);
/* Types may not be defined in a cast. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in casts");
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* A very tricky bit is that `(struct S) { 3 }' is a
compound-literal (which we permit in C++ as an extension).
But, that construct is not a cast-expression -- it is a
postfix-expression. (The reason is that `(struct S) { 3 }.i'
is legal; if the compound-literal were a cast-expression,
you'd need an extra set of parentheses.) But, if we parse
the type-id, and it happens to be a class-specifier, then we
will commit to the parse at that point, because we cannot
undo the action that is done when creating a new class. So,
then we cannot back up and do a postfix-expression.
Another tricky case is the following (c++/29234):
struct S { void operator () (); };
void foo ()
{
( S()() );
}
As a type-id we parse the parenthesized S()() as a function
returning a function, groktypename complains and we cannot
back up in this case either.
Therefore, we scan ahead to the closing `)', and check to see
if the tokens after the `)' can start a cast-expression. Otherwise
we are dealing with an unary-expression, a postfix-expression
or something else.
Yet another tricky case, in C++11, is the following (c++/54891):
(void)[]{};
The issue is that usually, besides the case of lambda-expressions,
the parenthesized type-id cannot be followed by '[', and, eg, we
want to parse '(C ())[2];' in parse/pr26997.C as unary-expression.
Thus, if cp_parser_tokens_start_cast_expression returns -1, below
we don't commit, we try a cast-expression, then an unary-expression.
Save tokens so that we can put them back. */
cp_lexer_save_tokens (parser->lexer);
/* We may be looking at a cast-expression. */
if (cp_parser_skip_to_closing_parenthesis (parser, false, false,
/*consume_paren=*/true))
cast_expression
= cp_parser_tokens_start_cast_expression (parser);
/* Roll back the tokens we skipped. */
cp_lexer_rollback_tokens (parser->lexer);
/* If we aren't looking at a cast-expression, simulate an error so
that the call to cp_parser_error_occurred below returns true. */
if (!cast_expression)
cp_parser_simulate_error (parser);
else
{
bool saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
/* Look for the type-id. */
type = cp_parser_type_id (parser);
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
}
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
/* At this point this can only be either a cast or a
parenthesized ctor such as `(T ())' that looks like a cast to
function returning T. */
if (!cp_parser_error_occurred (parser))
{
/* Only commit if the cast-expression doesn't start with
'++', '--', or '[' in C++11. */
if (cast_expression > 0)
cp_parser_commit_to_topmost_tentative_parse (parser);
expr = cp_parser_cast_expression (parser,
/*address_p=*/false,
/*cast_p=*/true,
/*decltype_p=*/false,
pidk);
if (cp_parser_parse_definitely (parser))
{
/* Warn about old-style casts, if so requested. */
if (warn_old_style_cast
&& !in_system_header_at (input_location)
&& !VOID_TYPE_P (type)
&& current_lang_name != lang_name_c)
warning (OPT_Wold_style_cast, "use of old-style cast");
/* Only type conversions to integral or enumeration types
can be used in constant-expressions. */
if (!cast_valid_in_integral_constant_expression_p (type)
&& cp_parser_non_integral_constant_expression (parser,
NIC_CAST))
return error_mark_node;
/* Perform the cast. */
expr = build_c_cast (input_location, type, expr);
return expr;
}
}
else
cp_parser_abort_tentative_parse (parser);
}
/* If we get here, then it's not a cast, so it must be a
unary-expression. */
return cp_parser_unary_expression (parser, pidk, address_p,
cast_p, decltype_p);
}
/* Parse a binary expression of the general form:
pm-expression:
cast-expression
pm-expression .* cast-expression
pm-expression ->* cast-expression
multiplicative-expression:
pm-expression
multiplicative-expression * pm-expression
multiplicative-expression / pm-expression
multiplicative-expression % pm-expression
additive-expression:
multiplicative-expression
additive-expression + multiplicative-expression
additive-expression - multiplicative-expression
shift-expression:
additive-expression
shift-expression << additive-expression
shift-expression >> additive-expression
relational-expression:
shift-expression
relational-expression < shift-expression
relational-expression > shift-expression
relational-expression <= shift-expression
relational-expression >= shift-expression
GNU Extension:
relational-expression:
relational-expression <? shift-expression
relational-expression >? shift-expression
equality-expression:
relational-expression
equality-expression == relational-expression
equality-expression != relational-expression
and-expression:
equality-expression
and-expression & equality-expression
exclusive-or-expression:
and-expression
exclusive-or-expression ^ and-expression
inclusive-or-expression:
exclusive-or-expression
inclusive-or-expression | exclusive-or-expression
logical-and-expression:
inclusive-or-expression
logical-and-expression && inclusive-or-expression
logical-or-expression:
logical-and-expression
logical-or-expression || logical-and-expression
All these are implemented with a single function like:
binary-expression:
simple-cast-expression
binary-expression <token> binary-expression
CAST_P is true if this expression is the target of a cast.
The binops_by_token map is used to get the tree codes for each <token> type.
binary-expressions are associated according to a precedence table. */
#define TOKEN_PRECEDENCE(token) \
(((token->type == CPP_GREATER \
|| ((cxx_dialect != cxx98) && token->type == CPP_RSHIFT)) \
&& !parser->greater_than_is_operator_p) \
? PREC_NOT_OPERATOR \
: binops_by_token[token->type].prec)
static tree
cp_parser_binary_expression (cp_parser* parser, bool cast_p,
bool no_toplevel_fold_p,
bool decltype_p,
enum cp_parser_prec prec,
cp_id_kind * pidk)
{
cp_parser_expression_stack stack;
cp_parser_expression_stack_entry *sp = &stack[0];
cp_parser_expression_stack_entry current;
tree rhs;
cp_token *token;
enum tree_code rhs_type;
enum cp_parser_prec new_prec, lookahead_prec;
tree overload;
/* Parse the first expression. */
current.lhs_type = (cp_lexer_next_token_is (parser->lexer, CPP_NOT)
? TRUTH_NOT_EXPR : ERROR_MARK);
current.lhs = cp_parser_cast_expression (parser, /*address_p=*/false,
cast_p, decltype_p, pidk);
current.prec = prec;
if (cp_parser_error_occurred (parser))
return error_mark_node;
for (;;)
{
/* Get an operator token. */
token = cp_lexer_peek_token (parser->lexer);
if (warn_cxx0x_compat
&& token->type == CPP_RSHIFT
&& !parser->greater_than_is_operator_p)
{
if (warning_at (token->location, OPT_Wc__0x_compat,
"%<>>%> operator is treated"
" as two right angle brackets in C++11"))
inform (token->location,
"suggest parentheses around %<>>%> expression");
}
new_prec = TOKEN_PRECEDENCE (token);
/* Popping an entry off the stack means we completed a subexpression:
- either we found a token which is not an operator (`>' where it is not
an operator, or prec == PREC_NOT_OPERATOR), in which case popping
will happen repeatedly;
- or, we found an operator which has lower priority. This is the case
where the recursive descent *ascends*, as in `3 * 4 + 5' after
parsing `3 * 4'. */
if (new_prec <= current.prec)
{
if (sp == stack)
break;
else
goto pop;
}
get_rhs:
current.tree_type = binops_by_token[token->type].tree_type;
current.loc = token->location;
/* We used the operator token. */
cp_lexer_consume_token (parser->lexer);
/* For "false && x" or "true || x", x will never be executed;
disable warnings while evaluating it. */
if (current.tree_type == TRUTH_ANDIF_EXPR)
c_inhibit_evaluation_warnings += current.lhs == truthvalue_false_node;
else if (current.tree_type == TRUTH_ORIF_EXPR)
c_inhibit_evaluation_warnings += current.lhs == truthvalue_true_node;
/* Extract another operand. It may be the RHS of this expression
or the LHS of a new, higher priority expression. */
rhs_type = (cp_lexer_next_token_is (parser->lexer, CPP_NOT)
? TRUTH_NOT_EXPR : ERROR_MARK);
rhs = cp_parser_simple_cast_expression (parser);
/* Get another operator token. Look up its precedence to avoid
building a useless (immediately popped) stack entry for common
cases such as 3 + 4 + 5 or 3 * 4 + 5. */
token = cp_lexer_peek_token (parser->lexer);
lookahead_prec = TOKEN_PRECEDENCE (token);
if (lookahead_prec > new_prec)
{
/* ... and prepare to parse the RHS of the new, higher priority
expression. Since precedence levels on the stack are
monotonically increasing, we do not have to care about
stack overflows. */
*sp = current;
++sp;
current.lhs = rhs;
current.lhs_type = rhs_type;
current.prec = new_prec;
new_prec = lookahead_prec;
goto get_rhs;
pop:
lookahead_prec = new_prec;
/* If the stack is not empty, we have parsed into LHS the right side
(`4' in the example above) of an expression we had suspended.
We can use the information on the stack to recover the LHS (`3')
from the stack together with the tree code (`MULT_EXPR'), and
the precedence of the higher level subexpression
(`PREC_ADDITIVE_EXPRESSION'). TOKEN is the CPP_PLUS token,
which will be used to actually build the additive expression. */
rhs = current.lhs;
rhs_type = current.lhs_type;
--sp;
current = *sp;
}
/* Undo the disabling of warnings done above. */
if (current.tree_type == TRUTH_ANDIF_EXPR)
c_inhibit_evaluation_warnings -= current.lhs == truthvalue_false_node;
else if (current.tree_type == TRUTH_ORIF_EXPR)
c_inhibit_evaluation_warnings -= current.lhs == truthvalue_true_node;
if (warn_logical_not_paren
&& TREE_CODE_CLASS (current.tree_type) == tcc_comparison
&& current.lhs_type == TRUTH_NOT_EXPR
/* Avoid warning for !!x == y. */
&& (TREE_CODE (current.lhs) != NE_EXPR
|| !integer_zerop (TREE_OPERAND (current.lhs, 1)))
&& (TREE_CODE (current.lhs) != TRUTH_NOT_EXPR
|| (TREE_CODE (TREE_OPERAND (current.lhs, 0)) != TRUTH_NOT_EXPR
/* Avoid warning for !b == y where b is boolean. */
&& (TREE_TYPE (TREE_OPERAND (current.lhs, 0)) == NULL_TREE
|| (TREE_CODE (TREE_TYPE (TREE_OPERAND (current.lhs, 0)))
!= BOOLEAN_TYPE))))
/* Avoid warning for !!b == y where b is boolean. */
&& (!DECL_P (current.lhs)
|| TREE_TYPE (current.lhs) == NULL_TREE
|| TREE_CODE (TREE_TYPE (current.lhs)) != BOOLEAN_TYPE))
warn_logical_not_parentheses (current.loc, current.tree_type,
maybe_constant_value (rhs));
overload = NULL;
/* ??? Currently we pass lhs_type == ERROR_MARK and rhs_type ==
ERROR_MARK for everything that is not a binary expression.
This makes warn_about_parentheses miss some warnings that
involve unary operators. For unary expressions we should
pass the correct tree_code unless the unary expression was
surrounded by parentheses.
*/
if (no_toplevel_fold_p
&& lookahead_prec <= current.prec
&& sp == stack)
current.lhs = build2 (current.tree_type,
TREE_CODE_CLASS (current.tree_type)
== tcc_comparison
? boolean_type_node : TREE_TYPE (current.lhs),
current.lhs, rhs);
else
current.lhs = build_x_binary_op (current.loc, current.tree_type,
current.lhs, current.lhs_type,
rhs, rhs_type, &overload,
complain_flags (decltype_p));
current.lhs_type = current.tree_type;
if (EXPR_P (current.lhs))
SET_EXPR_LOCATION (current.lhs, current.loc);
/* If the binary operator required the use of an overloaded operator,
then this expression cannot be an integral constant-expression.
An overloaded operator can be used even if both operands are
otherwise permissible in an integral constant-expression if at
least one of the operands is of enumeration type. */
if (overload
&& cp_parser_non_integral_constant_expression (parser,
NIC_OVERLOADED))
return error_mark_node;
}
return current.lhs;
}
static tree
cp_parser_binary_expression (cp_parser* parser, bool cast_p,
bool no_toplevel_fold_p,
enum cp_parser_prec prec,
cp_id_kind * pidk)
{
return cp_parser_binary_expression (parser, cast_p, no_toplevel_fold_p,
/*decltype*/false, prec, pidk);
}
/* Parse the `? expression : assignment-expression' part of a
conditional-expression. The LOGICAL_OR_EXPR is the
logical-or-expression that started the conditional-expression.
Returns a representation of the entire conditional-expression.
This routine is used by cp_parser_assignment_expression.
? expression : assignment-expression
GNU Extensions:
? : assignment-expression */
static tree
cp_parser_question_colon_clause (cp_parser* parser, tree logical_or_expr)
{
tree expr;
tree assignment_expr;
struct cp_token *token;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
/* Consume the `?' token. */
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
if (cp_parser_allow_gnu_extensions_p (parser)
&& token->type == CPP_COLON)
{
pedwarn (token->location, OPT_Wpedantic,
"ISO C++ does not allow ?: with omitted middle operand");
/* Implicit true clause. */
expr = NULL_TREE;
c_inhibit_evaluation_warnings += logical_or_expr == truthvalue_true_node;
warn_for_omitted_condop (token->location, logical_or_expr);
}
else
{
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
/* Parse the expression. */
c_inhibit_evaluation_warnings += logical_or_expr == truthvalue_false_node;
expr = cp_parser_expression (parser);
c_inhibit_evaluation_warnings +=
((logical_or_expr == truthvalue_true_node)
- (logical_or_expr == truthvalue_false_node));
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
}
/* The next token should be a `:'. */
cp_parser_require (parser, CPP_COLON, RT_COLON);
/* Parse the assignment-expression. */
assignment_expr = cp_parser_assignment_expression (parser);
c_inhibit_evaluation_warnings -= logical_or_expr == truthvalue_true_node;
/* Build the conditional-expression. */
return build_x_conditional_expr (loc, logical_or_expr,
expr,
assignment_expr,
tf_warning_or_error);
}
/* Parse an assignment-expression.
assignment-expression:
conditional-expression
logical-or-expression assignment-operator assignment_expression
throw-expression
CAST_P is true if this expression is the target of a cast.
DECLTYPE_P is true if this expression is the operand of decltype.
Returns a representation for the expression. */
static tree
cp_parser_assignment_expression (cp_parser* parser, cp_id_kind * pidk,
bool cast_p, bool decltype_p)
{
tree expr;
/* If the next token is the `throw' keyword, then we're looking at
a throw-expression. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THROW))
expr = cp_parser_throw_expression (parser);
/* Otherwise, it must be that we are looking at a
logical-or-expression. */
else
{
/* Parse the binary expressions (logical-or-expression). */
expr = cp_parser_binary_expression (parser, cast_p, false,
decltype_p,
PREC_NOT_OPERATOR, pidk);
/* If the next token is a `?' then we're actually looking at a
conditional-expression. */
if (cp_lexer_next_token_is (parser->lexer, CPP_QUERY))
return cp_parser_question_colon_clause (parser, expr);
else
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
/* If it's an assignment-operator, we're using the second
production. */
enum tree_code assignment_operator
= cp_parser_assignment_operator_opt (parser);
if (assignment_operator != ERROR_MARK)
{
bool non_constant_p;
location_t saved_input_location;
/* Parse the right-hand side of the assignment. */
tree rhs = cp_parser_initializer_clause (parser, &non_constant_p);
if (BRACE_ENCLOSED_INITIALIZER_P (rhs))
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
/* An assignment may not appear in a
constant-expression. */
if (cp_parser_non_integral_constant_expression (parser,
NIC_ASSIGNMENT))
return error_mark_node;
/* Build the assignment expression. Its default
location is the location of the '=' token. */
saved_input_location = input_location;
input_location = loc;
expr = build_x_modify_expr (loc, expr,
assignment_operator,
rhs,
complain_flags (decltype_p));
input_location = saved_input_location;
}
}
}
return expr;
}
/* Parse an (optional) assignment-operator.
assignment-operator: one of
= *= /= %= += -= >>= <<= &= ^= |=
GNU Extension:
assignment-operator: one of
<?= >?=
If the next token is an assignment operator, the corresponding tree
code is returned, and the token is consumed. For example, for
`+=', PLUS_EXPR is returned. For `=' itself, the code returned is
NOP_EXPR. For `/', TRUNC_DIV_EXPR is returned; for `%',
TRUNC_MOD_EXPR is returned. If TOKEN is not an assignment
operator, ERROR_MARK is returned. */
static enum tree_code
cp_parser_assignment_operator_opt (cp_parser* parser)
{
enum tree_code op;
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EQ:
op = NOP_EXPR;
break;
case CPP_MULT_EQ:
op = MULT_EXPR;
break;
case CPP_DIV_EQ:
op = TRUNC_DIV_EXPR;
break;
case CPP_MOD_EQ:
op = TRUNC_MOD_EXPR;
break;
case CPP_PLUS_EQ:
op = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
op = MINUS_EXPR;
break;
case CPP_RSHIFT_EQ:
op = RSHIFT_EXPR;
break;
case CPP_LSHIFT_EQ:
op = LSHIFT_EXPR;
break;
case CPP_AND_EQ:
op = BIT_AND_EXPR;
break;
case CPP_XOR_EQ:
op = BIT_XOR_EXPR;
break;
case CPP_OR_EQ:
op = BIT_IOR_EXPR;
break;
default:
/* Nothing else is an assignment operator. */
op = ERROR_MARK;
}
/* If it was an assignment operator, consume it. */
if (op != ERROR_MARK)
cp_lexer_consume_token (parser->lexer);
return op;
}
/* Parse an expression.
expression:
assignment-expression
expression , assignment-expression
CAST_P is true if this expression is the target of a cast.
DECLTYPE_P is true if this expression is the immediate operand of decltype,
except possibly parenthesized or on the RHS of a comma (N3276).
Returns a representation of the expression. */
static tree
cp_parser_expression (cp_parser* parser, cp_id_kind * pidk,
bool cast_p, bool decltype_p)
{
tree expression = NULL_TREE;
location_t loc = UNKNOWN_LOCATION;
while (true)
{
tree assignment_expression;
/* Parse the next assignment-expression. */
assignment_expression
= cp_parser_assignment_expression (parser, pidk, cast_p, decltype_p);
/* We don't create a temporary for a call that is the immediate operand
of decltype or on the RHS of a comma. But when we see a comma, we
need to create a temporary for a call on the LHS. */
if (decltype_p && !processing_template_decl
&& TREE_CODE (assignment_expression) == CALL_EXPR
&& CLASS_TYPE_P (TREE_TYPE (assignment_expression))
&& cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
assignment_expression
= build_cplus_new (TREE_TYPE (assignment_expression),
assignment_expression, tf_warning_or_error);
/* If this is the first assignment-expression, we can just
save it away. */
if (!expression)
expression = assignment_expression;
else
expression = build_x_compound_expr (loc, expression,
assignment_expression,
complain_flags (decltype_p));
/* If the next token is not a comma, then we are done with the
expression. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,'. */
loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer);
/* A comma operator cannot appear in a constant-expression. */
if (cp_parser_non_integral_constant_expression (parser, NIC_COMMA))
expression = error_mark_node;
}
return expression;
}
/* Parse a constant-expression.
constant-expression:
conditional-expression
If ALLOW_NON_CONSTANT_P a non-constant expression is silently
accepted. If ALLOW_NON_CONSTANT_P is true and the expression is not
constant, *NON_CONSTANT_P is set to TRUE. If ALLOW_NON_CONSTANT_P
is false, NON_CONSTANT_P should be NULL. */
static tree
cp_parser_constant_expression (cp_parser* parser,
bool allow_non_constant_p,
bool *non_constant_p)
{
bool saved_integral_constant_expression_p;
bool saved_allow_non_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
tree expression;
/* It might seem that we could simply parse the
conditional-expression, and then check to see if it were
TREE_CONSTANT. However, an expression that is TREE_CONSTANT is
one that the compiler can figure out is constant, possibly after
doing some simplifications or optimizations. The standard has a
precise definition of constant-expression, and we must honor
that, even though it is somewhat more restrictive.
For example:
int i[(2, 3)];
is not a legal declaration, because `(2, 3)' is not a
constant-expression. The `,' operator is forbidden in a
constant-expression. However, GCC's constant-folding machinery
will fold this operation to an INTEGER_CST for `3'. */
/* Save the old settings. */
saved_integral_constant_expression_p = parser->integral_constant_expression_p;
saved_allow_non_integral_constant_expression_p
= parser->allow_non_integral_constant_expression_p;
saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p;
/* We are now parsing a constant-expression. */
parser->integral_constant_expression_p = true;
parser->allow_non_integral_constant_expression_p
= (allow_non_constant_p || cxx_dialect >= cxx11);
parser->non_integral_constant_expression_p = false;
/* Although the grammar says "conditional-expression", we parse an
"assignment-expression", which also permits "throw-expression"
and the use of assignment operators. In the case that
ALLOW_NON_CONSTANT_P is false, we get better errors than we would
otherwise. In the case that ALLOW_NON_CONSTANT_P is true, it is
actually essential that we look for an assignment-expression.
For example, cp_parser_initializer_clauses uses this function to
determine whether a particular assignment-expression is in fact
constant. */
expression = cp_parser_assignment_expression (parser);
/* Restore the old settings. */
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->allow_non_integral_constant_expression_p
= saved_allow_non_integral_constant_expression_p;
if (cxx_dialect >= cxx11)
{
/* Require an rvalue constant expression here; that's what our
callers expect. Reference constant expressions are handled
separately in e.g. cp_parser_template_argument. */
bool is_const = potential_rvalue_constant_expression (expression);
parser->non_integral_constant_expression_p = !is_const;
if (!is_const && !allow_non_constant_p)
require_potential_rvalue_constant_expression (expression);
}
if (allow_non_constant_p)
*non_constant_p = parser->non_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
return expression;
}
/* Parse __builtin_offsetof.
offsetof-expression:
"__builtin_offsetof" "(" type-id "," offsetof-member-designator ")"
offsetof-member-designator:
id-expression
| offsetof-member-designator "." id-expression
| offsetof-member-designator "[" expression "]"
| offsetof-member-designator "->" id-expression */
static tree
cp_parser_builtin_offsetof (cp_parser *parser)
{
int save_ice_p, save_non_ice_p;
tree type, expr;
cp_id_kind dummy;
cp_token *token;
/* We're about to accept non-integral-constant things, but will
definitely yield an integral constant expression. Save and
restore these values around our local parsing. */
save_ice_p = parser->integral_constant_expression_p;
save_non_ice_p = parser->non_integral_constant_expression_p;
/* Consume the "__builtin_offsetof" token. */
cp_lexer_consume_token (parser->lexer);
/* Consume the opening `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the type-id. */
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
type = cp_parser_type_id (parser);
/* Look for the `,'. */
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
token = cp_lexer_peek_token (parser->lexer);
/* Build the (type *)null that begins the traditional offsetof macro. */
expr = build_static_cast (build_pointer_type (type), null_pointer_node,
tf_warning_or_error);
/* Parse the offsetof-member-designator. We begin as if we saw "expr->". */
expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DEREF, expr,
true, &dummy, token->location);
while (true)
{
token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_OPEN_SQUARE:
/* offsetof-member-designator "[" expression "]" */
expr = cp_parser_postfix_open_square_expression (parser, expr,
true, false);
break;
case CPP_DEREF:
/* offsetof-member-designator "->" identifier */
expr = grok_array_decl (token->location, expr,
integer_zero_node, false);
/* FALLTHRU */
case CPP_DOT:
/* offsetof-member-designator "." identifier */
cp_lexer_consume_token (parser->lexer);
expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DOT,
expr, true, &dummy,
token->location);
break;
case CPP_CLOSE_PAREN:
/* Consume the ")" token. */
cp_lexer_consume_token (parser->lexer);
goto success;
default:
/* Error. We know the following require will fail, but
that gives the proper error message. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
cp_parser_skip_to_closing_parenthesis (parser, true, false, true);
expr = error_mark_node;
goto failure;
}
}
success:
expr = finish_offsetof (expr, loc);
failure:
parser->integral_constant_expression_p = save_ice_p;
parser->non_integral_constant_expression_p = save_non_ice_p;
return expr;
}
/* Parse a trait expression.
Returns a representation of the expression, the underlying type
of the type at issue when KEYWORD is RID_UNDERLYING_TYPE. */
static tree
cp_parser_trait_expr (cp_parser* parser, enum rid keyword)
{
cp_trait_kind kind;
tree type1, type2 = NULL_TREE;
bool binary = false;
bool variadic = false;
switch (keyword)
{
case RID_HAS_NOTHROW_ASSIGN:
kind = CPTK_HAS_NOTHROW_ASSIGN;
break;
case RID_HAS_NOTHROW_CONSTRUCTOR:
kind = CPTK_HAS_NOTHROW_CONSTRUCTOR;
break;
case RID_HAS_NOTHROW_COPY:
kind = CPTK_HAS_NOTHROW_COPY;
break;
case RID_HAS_TRIVIAL_ASSIGN:
kind = CPTK_HAS_TRIVIAL_ASSIGN;
break;
case RID_HAS_TRIVIAL_CONSTRUCTOR:
kind = CPTK_HAS_TRIVIAL_CONSTRUCTOR;
break;
case RID_HAS_TRIVIAL_COPY:
kind = CPTK_HAS_TRIVIAL_COPY;
break;
case RID_HAS_TRIVIAL_DESTRUCTOR:
kind = CPTK_HAS_TRIVIAL_DESTRUCTOR;
break;
case RID_HAS_VIRTUAL_DESTRUCTOR:
kind = CPTK_HAS_VIRTUAL_DESTRUCTOR;
break;
case RID_IS_ABSTRACT:
kind = CPTK_IS_ABSTRACT;
break;
case RID_IS_BASE_OF:
kind = CPTK_IS_BASE_OF;
binary = true;
break;
case RID_IS_CLASS:
kind = CPTK_IS_CLASS;
break;
case RID_IS_EMPTY:
kind = CPTK_IS_EMPTY;
break;
case RID_IS_ENUM:
kind = CPTK_IS_ENUM;
break;
case RID_IS_FINAL:
kind = CPTK_IS_FINAL;
break;
case RID_IS_LITERAL_TYPE:
kind = CPTK_IS_LITERAL_TYPE;
break;
case RID_IS_POD:
kind = CPTK_IS_POD;
break;
case RID_IS_POLYMORPHIC:
kind = CPTK_IS_POLYMORPHIC;
break;
case RID_IS_STD_LAYOUT:
kind = CPTK_IS_STD_LAYOUT;
break;
case RID_IS_TRIVIAL:
kind = CPTK_IS_TRIVIAL;
break;
case RID_IS_TRIVIALLY_ASSIGNABLE:
kind = CPTK_IS_TRIVIALLY_ASSIGNABLE;
binary = true;
break;
case RID_IS_TRIVIALLY_CONSTRUCTIBLE:
kind = CPTK_IS_TRIVIALLY_CONSTRUCTIBLE;
variadic = true;
break;
case RID_IS_TRIVIALLY_COPYABLE:
kind = CPTK_IS_TRIVIALLY_COPYABLE;
break;
case RID_IS_UNION:
kind = CPTK_IS_UNION;
break;
case RID_UNDERLYING_TYPE:
kind = CPTK_UNDERLYING_TYPE;
break;
case RID_BASES:
kind = CPTK_BASES;
break;
case RID_DIRECT_BASES:
kind = CPTK_DIRECT_BASES;
break;
default:
gcc_unreachable ();
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
type1 = cp_parser_type_id (parser);
if (type1 == error_mark_node)
return error_mark_node;
if (binary)
{
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
type2 = cp_parser_type_id (parser);
if (type2 == error_mark_node)
return error_mark_node;
}
else if (variadic)
{
while (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_lexer_consume_token (parser->lexer);
tree elt = cp_parser_type_id (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
cp_lexer_consume_token (parser->lexer);
elt = make_pack_expansion (elt);
}
if (elt == error_mark_node)
return error_mark_node;
type2 = tree_cons (NULL_TREE, elt, type2);
}
}
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Complete the trait expression, which may mean either processing
the trait expr now or saving it for template instantiation. */
switch(kind)
{
case CPTK_UNDERLYING_TYPE:
return finish_underlying_type (type1);
case CPTK_BASES:
return finish_bases (type1, false);
case CPTK_DIRECT_BASES:
return finish_bases (type1, true);
default:
return finish_trait_expr (kind, type1, type2);
}
}
/* Lambdas that appear in variable initializer or default argument scope
get that in their mangling, so we need to record it. We might as well
use the count for function and namespace scopes as well. */
static GTY(()) tree lambda_scope;
static GTY(()) int lambda_count;
typedef struct GTY(()) tree_int
{
tree t;
int i;
} tree_int;
static GTY(()) vec<tree_int, va_gc> *lambda_scope_stack;
static void
start_lambda_scope (tree decl)
{
tree_int ti;
gcc_assert (decl);
/* Once we're inside a function, we ignore other scopes and just push
the function again so that popping works properly. */
if (current_function_decl && TREE_CODE (decl) != FUNCTION_DECL)
decl = current_function_decl;
ti.t = lambda_scope;
ti.i = lambda_count;
vec_safe_push (lambda_scope_stack, ti);
if (lambda_scope != decl)
{
/* Don't reset the count if we're still in the same function. */
lambda_scope = decl;
lambda_count = 0;
}
}
static void
record_lambda_scope (tree lambda)
{
LAMBDA_EXPR_EXTRA_SCOPE (lambda) = lambda_scope;
LAMBDA_EXPR_DISCRIMINATOR (lambda) = lambda_count++;
}
static void
finish_lambda_scope (void)
{
tree_int *p = &lambda_scope_stack->last ();
if (lambda_scope != p->t)
{
lambda_scope = p->t;
lambda_count = p->i;
}
lambda_scope_stack->pop ();
}
/* Parse a lambda expression.
lambda-expression:
lambda-introducer lambda-declarator [opt] compound-statement
Returns a representation of the expression. */
static tree
cp_parser_lambda_expression (cp_parser* parser)
{
tree lambda_expr = build_lambda_expr ();
tree type;
bool ok = true;
cp_token *token = cp_lexer_peek_token (parser->lexer);
cp_token_position start = 0;
LAMBDA_EXPR_LOCATION (lambda_expr) = token->location;
if (cp_unevaluated_operand)
{
if (!token->error_reported)
{
error_at (LAMBDA_EXPR_LOCATION (lambda_expr),
"lambda-expression in unevaluated context");
token->error_reported = true;
}
ok = false;
}
else if (parser->in_template_argument_list_p)
{
if (!token->error_reported)
{
error_at (token->location, "lambda-expression in template-argument");
token->error_reported = true;
}
ok = false;
}
/* We may be in the middle of deferred access check. Disable
it now. */
push_deferring_access_checks (dk_no_deferred);
cp_parser_lambda_introducer (parser, lambda_expr);
type = begin_lambda_type (lambda_expr);
if (type == error_mark_node)
return error_mark_node;
record_lambda_scope (lambda_expr);
/* Do this again now that LAMBDA_EXPR_EXTRA_SCOPE is set. */
determine_visibility (TYPE_NAME (type));
/* Now that we've started the type, add the capture fields for any
explicit captures. */
register_capture_members (LAMBDA_EXPR_CAPTURE_LIST (lambda_expr));
{
/* Inside the class, surrounding template-parameter-lists do not apply. */
unsigned int saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
unsigned char in_statement = parser->in_statement;
bool in_switch_statement_p = parser->in_switch_statement_p;
bool fully_implicit_function_template_p
= parser->fully_implicit_function_template_p;
tree implicit_template_parms = parser->implicit_template_parms;
cp_binding_level* implicit_template_scope = parser->implicit_template_scope;
bool auto_is_implicit_function_template_parm_p
= parser->auto_is_implicit_function_template_parm_p;
parser->num_template_parameter_lists = 0;
parser->in_statement = 0;
parser->in_switch_statement_p = false;
parser->fully_implicit_function_template_p = false;
parser->implicit_template_parms = 0;
parser->implicit_template_scope = 0;
parser->auto_is_implicit_function_template_parm_p = false;
/* By virtue of defining a local class, a lambda expression has access to
the private variables of enclosing classes. */
ok &= cp_parser_lambda_declarator_opt (parser, lambda_expr);
if (ok)
{
if (!cp_parser_error_occurred (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)
&& cp_parser_start_tentative_firewall (parser))
start = token;
cp_parser_lambda_body (parser, lambda_expr);
}
else if (cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
{
if (cp_parser_skip_to_closing_brace (parser))
cp_lexer_consume_token (parser->lexer);
}
/* The capture list was built up in reverse order; fix that now. */
LAMBDA_EXPR_CAPTURE_LIST (lambda_expr)
= nreverse (LAMBDA_EXPR_CAPTURE_LIST (lambda_expr));
if (ok)
maybe_add_lambda_conv_op (type);
type = finish_struct (type, /*attributes=*/NULL_TREE);
parser->num_template_parameter_lists = saved_num_template_parameter_lists;
parser->in_statement = in_statement;
parser->in_switch_statement_p = in_switch_statement_p;
parser->fully_implicit_function_template_p
= fully_implicit_function_template_p;
parser->implicit_template_parms = implicit_template_parms;
parser->implicit_template_scope = implicit_template_scope;
parser->auto_is_implicit_function_template_parm_p
= auto_is_implicit_function_template_parm_p;
}
pop_deferring_access_checks ();
/* This field is only used during parsing of the lambda. */
LAMBDA_EXPR_THIS_CAPTURE (lambda_expr) = NULL_TREE;
/* This lambda shouldn't have any proxies left at this point. */
gcc_assert (LAMBDA_EXPR_PENDING_PROXIES (lambda_expr) == NULL);
/* And now that we're done, push proxies for an enclosing lambda. */
insert_pending_capture_proxies ();
if (ok)
lambda_expr = build_lambda_object (lambda_expr);
else
lambda_expr = error_mark_node;
cp_parser_end_tentative_firewall (parser, start, lambda_expr);
return lambda_expr;
}
/* Parse the beginning of a lambda expression.
lambda-introducer:
[ lambda-capture [opt] ]
LAMBDA_EXPR is the current representation of the lambda expression. */
static void
cp_parser_lambda_introducer (cp_parser* parser, tree lambda_expr)
{
/* Need commas after the first capture. */
bool first = true;
/* Eat the leading `['. */
cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE);
/* Record default capture mode. "[&" "[=" "[&," "[=," */
if (cp_lexer_next_token_is (parser->lexer, CPP_AND)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_NAME)
LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) = CPLD_REFERENCE;
else if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) = CPLD_COPY;
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) != CPLD_NONE)
{
cp_lexer_consume_token (parser->lexer);
first = false;
}
while (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_SQUARE))
{
cp_token* capture_token;
tree capture_id;
tree capture_init_expr;
cp_id_kind idk = CP_ID_KIND_NONE;
bool explicit_init_p = false;
enum capture_kind_type
{
BY_COPY,
BY_REFERENCE
};
enum capture_kind_type capture_kind = BY_COPY;
if (cp_lexer_next_token_is (parser->lexer, CPP_EOF))
{
error ("expected end of capture-list");
return;
}
if (first)
first = false;
else
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
/* Possibly capture `this'. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THIS))
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_COPY)
pedwarn (loc, 0, "explicit by-copy capture of %<this%> redundant "
"with by-copy capture default");
cp_lexer_consume_token (parser->lexer);
add_capture (lambda_expr,
/*id=*/this_identifier,
/*initializer=*/finish_this_expr(),
/*by_reference_p=*/false,
explicit_init_p);
continue;
}
/* Remember whether we want to capture as a reference or not. */
if (cp_lexer_next_token_is (parser->lexer, CPP_AND))
{
capture_kind = BY_REFERENCE;
cp_lexer_consume_token (parser->lexer);
}
/* Get the identifier. */
capture_token = cp_lexer_peek_token (parser->lexer);
capture_id = cp_parser_identifier (parser);
if (capture_id == error_mark_node)
/* Would be nice to have a cp_parser_skip_to_closing_x for general
delimiters, but I modified this to stop on unnested ']' as well. It
was already changed to stop on unnested '}', so the
"closing_parenthesis" name is no more misleading with my change. */
{
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/true);
break;
}
/* Find the initializer for this capture. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
bool direct, non_constant;
/* An explicit initializer exists. */
if (cxx_dialect < cxx14)
pedwarn (input_location, 0,
"lambda capture initializers "
"only available with -std=c++14 or -std=gnu++14");
capture_init_expr = cp_parser_initializer (parser, &direct,
&non_constant);
explicit_init_p = true;
if (capture_init_expr == NULL_TREE)
{
error ("empty initializer for lambda init-capture");
capture_init_expr = error_mark_node;
}
}
else
{
const char* error_msg;
/* Turn the identifier into an id-expression. */
capture_init_expr
= cp_parser_lookup_name_simple (parser, capture_id,
capture_token->location);
if (capture_init_expr == error_mark_node)
{
unqualified_name_lookup_error (capture_id);
continue;
}
else if (DECL_P (capture_init_expr)
&& (!VAR_P (capture_init_expr)
&& TREE_CODE (capture_init_expr) != PARM_DECL))
{
error_at (capture_token->location,
"capture of non-variable %qD ",
capture_init_expr);
inform (0, "%q+#D declared here", capture_init_expr);
continue;
}
if (VAR_P (capture_init_expr)
&& decl_storage_duration (capture_init_expr) != dk_auto)
{
if (pedwarn (capture_token->location, 0, "capture of variable "
"%qD with non-automatic storage duration",
capture_init_expr))
inform (0, "%q+#D declared here", capture_init_expr);
continue;
}
capture_init_expr
= finish_id_expression
(capture_id,
capture_init_expr,
parser->scope,
&idk,
/*integral_constant_expression_p=*/false,
/*allow_non_integral_constant_expression_p=*/false,
/*non_integral_constant_expression_p=*/NULL,
/*template_p=*/false,
/*done=*/true,
/*address_p=*/false,
/*template_arg_p=*/false,
&error_msg,
capture_token->location);
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
cp_lexer_consume_token (parser->lexer);
capture_init_expr = make_pack_expansion (capture_init_expr);
}
else
check_for_bare_parameter_packs (capture_init_expr);
}
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) != CPLD_NONE
&& !explicit_init_p)
{
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_COPY
&& capture_kind == BY_COPY)
pedwarn (capture_token->location, 0, "explicit by-copy capture "
"of %qD redundant with by-copy capture default",
capture_id);
if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_REFERENCE
&& capture_kind == BY_REFERENCE)
pedwarn (capture_token->location, 0, "explicit by-reference "
"capture of %qD redundant with by-reference capture "
"default", capture_id);
}
add_capture (lambda_expr,
capture_id,
capture_init_expr,
/*by_reference_p=*/capture_kind == BY_REFERENCE,
explicit_init_p);
}
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
}
/* Parse the (optional) middle of a lambda expression.
lambda-declarator:
< template-parameter-list [opt] >
( parameter-declaration-clause [opt] )
attribute-specifier [opt]
mutable [opt]
exception-specification [opt]
lambda-return-type-clause [opt]
LAMBDA_EXPR is the current representation of the lambda expression. */
static bool
cp_parser_lambda_declarator_opt (cp_parser* parser, tree lambda_expr)
{
/* 5.1.1.4 of the standard says:
If a lambda-expression does not include a lambda-declarator, it is as if
the lambda-declarator were ().
This means an empty parameter list, no attributes, and no exception
specification. */
tree param_list = void_list_node;
tree attributes = NULL_TREE;
tree exception_spec = NULL_TREE;
tree template_param_list = NULL_TREE;
/* The template-parameter-list is optional, but must begin with
an opening angle if present. */
if (cp_lexer_next_token_is (parser->lexer, CPP_LESS))
{
if (cxx_dialect < cxx14)
pedwarn (parser->lexer->next_token->location, 0,
"lambda templates are only available with "
"-std=c++14 or -std=gnu++14");
cp_lexer_consume_token (parser->lexer);
template_param_list = cp_parser_template_parameter_list (parser);
cp_parser_skip_to_end_of_template_parameter_list (parser);
/* We just processed one more parameter list. */
++parser->num_template_parameter_lists;
}
/* The parameter-declaration-clause is optional (unless
template-parameter-list was given), but must begin with an
opening parenthesis if present. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
cp_lexer_consume_token (parser->lexer);
begin_scope (sk_function_parms, /*entity=*/NULL_TREE);
/* Parse parameters. */
param_list = cp_parser_parameter_declaration_clause (parser);
/* Default arguments shall not be specified in the
parameter-declaration-clause of a lambda-declarator. */
for (tree t = param_list; t; t = TREE_CHAIN (t))
if (TREE_PURPOSE (t) && cxx_dialect < cxx14)
pedwarn (DECL_SOURCE_LOCATION (TREE_VALUE (t)), OPT_Wpedantic,
"default argument specified for lambda parameter");
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
attributes = cp_parser_attributes_opt (parser);
/* Parse optional `mutable' keyword. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_MUTABLE))
{
cp_lexer_consume_token (parser->lexer);
LAMBDA_EXPR_MUTABLE_P (lambda_expr) = 1;
}
/* Parse optional exception specification. */
exception_spec = cp_parser_exception_specification_opt (parser);
/* Parse optional trailing return type. */
if (cp_lexer_next_token_is (parser->lexer, CPP_DEREF))
{
cp_lexer_consume_token (parser->lexer);
LAMBDA_EXPR_RETURN_TYPE (lambda_expr)
= cp_parser_trailing_type_id (parser);
}
/* The function parameters must be in scope all the way until after the
trailing-return-type in case of decltype. */
pop_bindings_and_leave_scope ();
}
else if (template_param_list != NULL_TREE) // generate diagnostic
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Create the function call operator.
Messing with declarators like this is no uglier than building up the
FUNCTION_DECL by hand, and this is less likely to get out of sync with
other code. */
{
cp_decl_specifier_seq return_type_specs;
cp_declarator* declarator;
tree fco;
int quals;
void *p;
clear_decl_specs (&return_type_specs);
if (LAMBDA_EXPR_RETURN_TYPE (lambda_expr))
return_type_specs.type = LAMBDA_EXPR_RETURN_TYPE (lambda_expr);
else
/* Maybe we will deduce the return type later. */
return_type_specs.type = make_auto ();
p = obstack_alloc (&declarator_obstack, 0);
declarator = make_id_declarator (NULL_TREE, ansi_opname (CALL_EXPR),
sfk_none);
quals = (LAMBDA_EXPR_MUTABLE_P (lambda_expr)
? TYPE_UNQUALIFIED : TYPE_QUAL_CONST);
declarator = make_call_declarator (declarator, param_list, quals,
VIRT_SPEC_UNSPECIFIED,
REF_QUAL_NONE,
exception_spec,
/*late_return_type=*/NULL_TREE);
declarator->id_loc = LAMBDA_EXPR_LOCATION (lambda_expr);
fco = grokmethod (&return_type_specs,
declarator,
attributes);
if (fco != error_mark_node)
{
DECL_INITIALIZED_IN_CLASS_P (fco) = 1;
DECL_ARTIFICIAL (fco) = 1;
/* Give the object parameter a different name. */
DECL_NAME (DECL_ARGUMENTS (fco)) = get_identifier ("__closure");
if (LAMBDA_EXPR_RETURN_TYPE (lambda_expr))
TYPE_HAS_LATE_RETURN_TYPE (TREE_TYPE (fco)) = 1;
}
if (template_param_list)
{
fco = finish_member_template_decl (fco);
finish_template_decl (template_param_list);
--parser->num_template_parameter_lists;
}
else if (parser->fully_implicit_function_template_p)
fco = finish_fully_implicit_template (parser, fco);
finish_member_declaration (fco);
obstack_free (&declarator_obstack, p);
return (fco != error_mark_node);
}
}
/* Parse the body of a lambda expression, which is simply
compound-statement
but which requires special handling.
LAMBDA_EXPR is the current representation of the lambda expression. */
static void
cp_parser_lambda_body (cp_parser* parser, tree lambda_expr)
{
bool nested = (current_function_decl != NULL_TREE);
bool local_variables_forbidden_p = parser->local_variables_forbidden_p;
if (nested)
push_function_context ();
else
/* Still increment function_depth so that we don't GC in the
middle of an expression. */
++function_depth;
/* Clear this in case we're in the middle of a default argument. */
parser->local_variables_forbidden_p = false;
/* Finish the function call operator
- class_specifier
+ late_parsing_for_member
+ function_definition_after_declarator
+ ctor_initializer_opt_and_function_body */
{
tree fco = lambda_function (lambda_expr);
tree body;
bool done = false;
tree compound_stmt;
tree cap;
/* Let the front end know that we are going to be defining this
function. */
start_preparsed_function (fco,
NULL_TREE,
SF_PRE_PARSED | SF_INCLASS_INLINE);
start_lambda_scope (fco);
body = begin_function_body ();
if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
goto out;
/* Push the proxies for any explicit captures. */
for (cap = LAMBDA_EXPR_CAPTURE_LIST (lambda_expr); cap;
cap = TREE_CHAIN (cap))
build_capture_proxy (TREE_PURPOSE (cap));
compound_stmt = begin_compound_stmt (0);
/* 5.1.1.4 of the standard says:
If a lambda-expression does not include a trailing-return-type, it
is as if the trailing-return-type denotes the following type:
* if the compound-statement is of the form
{ return attribute-specifier [opt] expression ; }
the type of the returned expression after lvalue-to-rvalue
conversion (_conv.lval_ 4.1), array-to-pointer conversion
(_conv.array_ 4.2), and function-to-pointer conversion
(_conv.func_ 4.3);
* otherwise, void. */
/* In a lambda that has neither a lambda-return-type-clause
nor a deducible form, errors should be reported for return statements
in the body. Since we used void as the placeholder return type, parsing
the body as usual will give such desired behavior. */
if (!LAMBDA_EXPR_RETURN_TYPE (lambda_expr)
&& cp_lexer_peek_nth_token (parser->lexer, 1)->keyword == RID_RETURN
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SEMICOLON)
{
tree expr = NULL_TREE;
cp_id_kind idk = CP_ID_KIND_NONE;
/* Parse tentatively in case there's more after the initial return
statement. */
cp_parser_parse_tentatively (parser);
cp_parser_require_keyword (parser, RID_RETURN, RT_RETURN);
expr = cp_parser_expression (parser, &idk);
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
if (cp_parser_parse_definitely (parser))
{
if (!processing_template_decl)
apply_deduced_return_type (fco, lambda_return_type (expr));
/* Will get error here if type not deduced yet. */
finish_return_stmt (expr);
done = true;
}
}
if (!done)
{
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL))
cp_parser_label_declaration (parser);
cp_parser_statement_seq_opt (parser, NULL_TREE);
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
finish_compound_stmt (compound_stmt);
out:
finish_function_body (body);
finish_lambda_scope ();
/* Finish the function and generate code for it if necessary. */
tree fn = finish_function (/*inline*/2);
/* Only expand if the call op is not a template. */
if (!DECL_TEMPLATE_INFO (fco))
expand_or_defer_fn (fn);
}
parser->local_variables_forbidden_p = local_variables_forbidden_p;
if (nested)
pop_function_context();
else
--function_depth;
}
/* Statements [gram.stmt.stmt] */
/* Parse a statement.
statement:
labeled-statement
expression-statement
compound-statement
selection-statement
iteration-statement
jump-statement
declaration-statement
try-block
C++11:
statement:
labeled-statement
attribute-specifier-seq (opt) expression-statement
attribute-specifier-seq (opt) compound-statement
attribute-specifier-seq (opt) selection-statement
attribute-specifier-seq (opt) iteration-statement
attribute-specifier-seq (opt) jump-statement
declaration-statement
attribute-specifier-seq (opt) try-block
TM Extension:
statement:
atomic-statement
IN_COMPOUND is true when the statement is nested inside a
cp_parser_compound_statement; this matters for certain pragmas.
If IF_P is not NULL, *IF_P is set to indicate whether the statement
is a (possibly labeled) if statement which is not enclosed in braces
and has an else clause. This is used to implement -Wparentheses. */
static void
cp_parser_statement (cp_parser* parser, tree in_statement_expr,
bool in_compound, bool *if_p)
{
tree statement, std_attrs = NULL_TREE;
cp_token *token;
location_t statement_location, attrs_location;
restart:
if (if_p != NULL)
*if_p = false;
/* There is no statement yet. */
statement = NULL_TREE;
saved_token_sentinel saved_tokens (parser->lexer);
attrs_location = cp_lexer_peek_token (parser->lexer)->location;
if (c_dialect_objc ())
/* In obj-c++, seeing '[[' might be the either the beginning of
c++11 attributes, or a nested objc-message-expression. So
let's parse the c++11 attributes tentatively. */
cp_parser_parse_tentatively (parser);
std_attrs = cp_parser_std_attribute_spec_seq (parser);
if (c_dialect_objc ())
{
if (!cp_parser_parse_definitely (parser))
std_attrs = NULL_TREE;
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Remember the location of the first token in the statement. */
statement_location = token->location;
/* If this is a keyword, then that will often determine what kind of
statement we have. */
if (token->type == CPP_KEYWORD)
{
enum rid keyword = token->keyword;
switch (keyword)
{
case RID_CASE:
case RID_DEFAULT:
/* Looks like a labeled-statement with a case label.
Parse the label, and then use tail recursion to parse
the statement. */
cp_parser_label_for_labeled_statement (parser, std_attrs);
goto restart;
case RID_IF:
case RID_SWITCH:
statement = cp_parser_selection_statement (parser, if_p);
break;
case RID_WHILE:
case RID_DO:
case RID_FOR:
statement = cp_parser_iteration_statement (parser, false);
break;
case RID_CILK_FOR:
if (!flag_cilkplus)
{
error_at (cp_lexer_peek_token (parser->lexer)->location,
"-fcilkplus must be enabled to use %<_Cilk_for%>");
cp_lexer_consume_token (parser->lexer);
statement = error_mark_node;
}
else
statement = cp_parser_cilk_for (parser, integer_zero_node);
break;
case RID_BREAK:
case RID_CONTINUE:
case RID_RETURN:
case RID_GOTO:
statement = cp_parser_jump_statement (parser);
break;
case RID_CILK_SYNC:
cp_lexer_consume_token (parser->lexer);
if (flag_cilkplus)
{
tree sync_expr = build_cilk_sync ();
SET_EXPR_LOCATION (sync_expr,
token->location);
statement = finish_expr_stmt (sync_expr);
}
else
{
error_at (token->location, "-fcilkplus must be enabled to use"
" %<_Cilk_sync%>");
statement = error_mark_node;
}
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
break;
/* Objective-C++ exception-handling constructs. */
case RID_AT_TRY:
case RID_AT_CATCH:
case RID_AT_FINALLY:
case RID_AT_SYNCHRONIZED:
case RID_AT_THROW:
statement = cp_parser_objc_statement (parser);
break;
case RID_TRY:
statement = cp_parser_try_block (parser);
break;
case RID_NAMESPACE:
/* This must be a namespace alias definition. */
cp_parser_declaration_statement (parser);
return;
case RID_TRANSACTION_ATOMIC:
case RID_TRANSACTION_RELAXED:
statement = cp_parser_transaction (parser, keyword);
break;
case RID_TRANSACTION_CANCEL:
statement = cp_parser_transaction_cancel (parser);
break;
default:
/* It might be a keyword like `int' that can start a
declaration-statement. */
break;
}
}
else if (token->type == CPP_NAME)
{
/* If the next token is a `:', then we are looking at a
labeled-statement. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
if (token->type == CPP_COLON)
{
/* Looks like a labeled-statement with an ordinary label.
Parse the label, and then use tail recursion to parse
the statement. */
cp_parser_label_for_labeled_statement (parser, std_attrs);
goto restart;
}
}
/* Anything that starts with a `{' must be a compound-statement. */
else if (token->type == CPP_OPEN_BRACE)
statement = cp_parser_compound_statement (parser, NULL, false, false);
/* CPP_PRAGMA is a #pragma inside a function body, which constitutes
a statement all its own. */
else if (token->type == CPP_PRAGMA)
{
/* Only certain OpenMP pragmas are attached to statements, and thus
are considered statements themselves. All others are not. In
the context of a compound, accept the pragma as a "statement" and
return so that we can check for a close brace. Otherwise we
require a real statement and must go back and read one. */
if (in_compound)
cp_parser_pragma (parser, pragma_compound);
else if (!cp_parser_pragma (parser, pragma_stmt))
goto restart;
return;
}
else if (token->type == CPP_EOF)
{
cp_parser_error (parser, "expected statement");
return;
}
/* Everything else must be a declaration-statement or an
expression-statement. Try for the declaration-statement
first, unless we are looking at a `;', in which case we know that
we have an expression-statement. */
if (!statement)
{
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
if (std_attrs != NULL_TREE)
{
/* Attributes should be parsed as part of the the
declaration, so let's un-parse them. */
saved_tokens.rollback();
std_attrs = NULL_TREE;
}
cp_parser_parse_tentatively (parser);
/* Try to parse the declaration-statement. */
cp_parser_declaration_statement (parser);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return;
}
/* Look for an expression-statement instead. */
statement = cp_parser_expression_statement (parser, in_statement_expr);
}
/* Set the line number for the statement. */
if (statement && STATEMENT_CODE_P (TREE_CODE (statement)))
SET_EXPR_LOCATION (statement, statement_location);
/* Note that for now, we don't do anything with c++11 statements
parsed at this level. */
if (std_attrs != NULL_TREE)
warning_at (attrs_location,
OPT_Wattributes,
"attributes at the beginning of statement are ignored");
}
/* Parse the label for a labeled-statement, i.e.
identifier :
case constant-expression :
default :
GNU Extension:
case constant-expression ... constant-expression : statement
When a label is parsed without errors, the label is added to the
parse tree by the finish_* functions, so this function doesn't
have to return the label. */
static void
cp_parser_label_for_labeled_statement (cp_parser* parser, tree attributes)
{
cp_token *token;
tree label = NULL_TREE;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
/* The next token should be an identifier. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_NAME
&& token->type != CPP_KEYWORD)
{
cp_parser_error (parser, "expected labeled-statement");
return;
}
parser->colon_corrects_to_scope_p = false;
switch (token->keyword)
{
case RID_CASE:
{
tree expr, expr_hi;
cp_token *ellipsis;
/* Consume the `case' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the constant-expression. */
expr = cp_parser_constant_expression (parser);
if (check_for_bare_parameter_packs (expr))
expr = error_mark_node;
ellipsis = cp_lexer_peek_token (parser->lexer);
if (ellipsis->type == CPP_ELLIPSIS)
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
expr_hi = cp_parser_constant_expression (parser);
if (check_for_bare_parameter_packs (expr_hi))
expr_hi = error_mark_node;
/* We don't need to emit warnings here, as the common code
will do this for us. */
}
else
expr_hi = NULL_TREE;
if (parser->in_switch_statement_p)
finish_case_label (token->location, expr, expr_hi);
else
error_at (token->location,
"case label %qE not within a switch statement",
expr);
}
break;
case RID_DEFAULT:
/* Consume the `default' token. */
cp_lexer_consume_token (parser->lexer);
if (parser->in_switch_statement_p)
finish_case_label (token->location, NULL_TREE, NULL_TREE);
else
error_at (token->location, "case label not within a switch statement");
break;
default:
/* Anything else must be an ordinary label. */
label = finish_label_stmt (cp_parser_identifier (parser));
break;
}
/* Require the `:' token. */
cp_parser_require (parser, CPP_COLON, RT_COLON);
/* An ordinary label may optionally be followed by attributes.
However, this is only permitted if the attributes are then
followed by a semicolon. This is because, for backward
compatibility, when parsing
lab: __attribute__ ((unused)) int i;
we want the attribute to attach to "i", not "lab". */
if (label != NULL_TREE
&& cp_next_tokens_can_be_gnu_attribute_p (parser))
{
tree attrs;
cp_parser_parse_tentatively (parser);
attrs = cp_parser_gnu_attributes_opt (parser);
if (attrs == NULL_TREE
|| cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
cp_parser_abort_tentative_parse (parser);
else if (!cp_parser_parse_definitely (parser))
;
else
attributes = chainon (attributes, attrs);
}
if (attributes != NULL_TREE)
cplus_decl_attributes (&label, attributes, 0);
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
}
/* Parse an expression-statement.
expression-statement:
expression [opt] ;
Returns the new EXPR_STMT -- or NULL_TREE if the expression
statement consists of nothing more than an `;'. IN_STATEMENT_EXPR_P
indicates whether this expression-statement is part of an
expression statement. */
static tree
cp_parser_expression_statement (cp_parser* parser, tree in_statement_expr)
{
tree statement = NULL_TREE;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If the next token is a ';', then there is no expression
statement. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
statement = cp_parser_expression (parser);
if (statement == error_mark_node
&& !cp_parser_uncommitted_to_tentative_parse_p (parser))
{
cp_parser_skip_to_end_of_block_or_statement (parser);
return error_mark_node;
}
}
/* Give a helpful message for "A<T>::type t;" and the like. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)
&& !cp_parser_uncommitted_to_tentative_parse_p (parser))
{
if (TREE_CODE (statement) == SCOPE_REF)
error_at (token->location, "need %<typename%> before %qE because "
"%qT is a dependent scope",
statement, TREE_OPERAND (statement, 0));
else if (is_overloaded_fn (statement)
&& DECL_CONSTRUCTOR_P (get_first_fn (statement)))
{
/* A::A a; */
tree fn = get_first_fn (statement);
error_at (token->location,
"%<%T::%D%> names the constructor, not the type",
DECL_CONTEXT (fn), DECL_NAME (fn));
}
}
/* Consume the final `;'. */
cp_parser_consume_semicolon_at_end_of_statement (parser);
if (in_statement_expr
&& cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
/* This is the final expression statement of a statement
expression. */
statement = finish_stmt_expr_expr (statement, in_statement_expr);
else if (statement)
statement = finish_expr_stmt (statement);
return statement;
}
/* Parse a compound-statement.
compound-statement:
{ statement-seq [opt] }
GNU extension:
compound-statement:
{ label-declaration-seq [opt] statement-seq [opt] }
label-declaration-seq:
label-declaration
label-declaration-seq label-declaration
Returns a tree representing the statement. */
static tree
cp_parser_compound_statement (cp_parser *parser, tree in_statement_expr,
bool in_try, bool function_body)
{
tree compound_stmt;
/* Consume the `{'. */
if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
return error_mark_node;
if (DECL_DECLARED_CONSTEXPR_P (current_function_decl)
&& !function_body && cxx_dialect < cxx14)
pedwarn (input_location, OPT_Wpedantic,
"compound-statement in constexpr function");
/* Begin the compound-statement. */
compound_stmt = begin_compound_stmt (in_try ? BCS_TRY_BLOCK : 0);
/* If the next keyword is `__label__' we have a label declaration. */
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL))
cp_parser_label_declaration (parser);
/* Parse an (optional) statement-seq. */
cp_parser_statement_seq_opt (parser, in_statement_expr);
/* Finish the compound-statement. */
finish_compound_stmt (compound_stmt);
/* Consume the `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
return compound_stmt;
}
/* Parse an (optional) statement-seq.
statement-seq:
statement
statement-seq [opt] statement */
static void
cp_parser_statement_seq_opt (cp_parser* parser, tree in_statement_expr)
{
/* Scan statements until there aren't any more. */
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If we are looking at a `}', then we have run out of
statements; the same is true if we have reached the end
of file, or have stumbled upon a stray '@end'. */
if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL
|| (token->type == CPP_KEYWORD && token->keyword == RID_AT_END))
break;
/* If we are in a compound statement and find 'else' then
something went wrong. */
else if (token->type == CPP_KEYWORD && token->keyword == RID_ELSE)
{
if (parser->in_statement & IN_IF_STMT)
break;
else
{
token = cp_lexer_consume_token (parser->lexer);
error_at (token->location, "%<else%> without a previous %<if%>");
}
}
/* Parse the statement. */
cp_parser_statement (parser, in_statement_expr, true, NULL);
}
}
/* Parse a selection-statement.
selection-statement:
if ( condition ) statement
if ( condition ) statement else statement
switch ( condition ) statement
Returns the new IF_STMT or SWITCH_STMT.
If IF_P is not NULL, *IF_P is set to indicate whether the statement
is a (possibly labeled) if statement which is not enclosed in
braces and has an else clause. This is used to implement
-Wparentheses. */
static tree
cp_parser_selection_statement (cp_parser* parser, bool *if_p)
{
cp_token *token;
enum rid keyword;
if (if_p != NULL)
*if_p = false;
/* Peek at the next token. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_SELECT);
/* See what kind of keyword it is. */
keyword = token->keyword;
switch (keyword)
{
case RID_IF:
case RID_SWITCH:
{
tree statement;
tree condition;
/* Look for the `('. */
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
{
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
/* Begin the selection-statement. */
if (keyword == RID_IF)
statement = begin_if_stmt ();
else
statement = begin_switch_stmt ();
/* Parse the condition. */
condition = cp_parser_condition (parser);
/* Look for the `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
if (keyword == RID_IF)
{
bool nested_if;
unsigned char in_statement;
/* Add the condition. */
finish_if_stmt_cond (condition, statement);
/* Parse the then-clause. */
in_statement = parser->in_statement;
parser->in_statement |= IN_IF_STMT;
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
add_stmt (build_empty_stmt (loc));
cp_lexer_consume_token (parser->lexer);
if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_ELSE))
warning_at (loc, OPT_Wempty_body, "suggest braces around "
"empty body in an %<if%> statement");
nested_if = false;
}
else
cp_parser_implicitly_scoped_statement (parser, &nested_if);
parser->in_statement = in_statement;
finish_then_clause (statement);
/* If the next token is `else', parse the else-clause. */
if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_ELSE))
{
/* Consume the `else' keyword. */
cp_lexer_consume_token (parser->lexer);
begin_else_clause (statement);
/* Parse the else-clause. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
location_t loc;
loc = cp_lexer_peek_token (parser->lexer)->location;
warning_at (loc,
OPT_Wempty_body, "suggest braces around "
"empty body in an %<else%> statement");
add_stmt (build_empty_stmt (loc));
cp_lexer_consume_token (parser->lexer);
}
else
cp_parser_implicitly_scoped_statement (parser, NULL);
finish_else_clause (statement);
/* If we are currently parsing a then-clause, then
IF_P will not be NULL. We set it to true to
indicate that this if statement has an else clause.
This may trigger the Wparentheses warning below
when we get back up to the parent if statement. */
if (if_p != NULL)
*if_p = true;
}
else
{
/* This if statement does not have an else clause. If
NESTED_IF is true, then the then-clause is an if
statement which does have an else clause. We warn
about the potential ambiguity. */
if (nested_if)
warning_at (EXPR_LOCATION (statement), OPT_Wparentheses,
"suggest explicit braces to avoid ambiguous"
" %<else%>");
}
/* Now we're all done with the if-statement. */
finish_if_stmt (statement);
}
else
{
bool in_switch_statement_p;
unsigned char in_statement;
/* Add the condition. */
finish_switch_cond (condition, statement);
/* Parse the body of the switch-statement. */
in_switch_statement_p = parser->in_switch_statement_p;
in_statement = parser->in_statement;
parser->in_switch_statement_p = true;
parser->in_statement |= IN_SWITCH_STMT;
cp_parser_implicitly_scoped_statement (parser, NULL);
parser->in_switch_statement_p = in_switch_statement_p;
parser->in_statement = in_statement;
/* Now we're all done with the switch-statement. */
finish_switch_stmt (statement);
}
return statement;
}
break;
default:
cp_parser_error (parser, "expected selection-statement");
return error_mark_node;
}
}
/* Parse a condition.
condition:
expression
type-specifier-seq declarator = initializer-clause
type-specifier-seq declarator braced-init-list
GNU Extension:
condition:
type-specifier-seq declarator asm-specification [opt]
attributes [opt] = assignment-expression
Returns the expression that should be tested. */
static tree
cp_parser_condition (cp_parser* parser)
{
cp_decl_specifier_seq type_specifiers;
const char *saved_message;
int declares_class_or_enum;
/* Try the declaration first. */
cp_parser_parse_tentatively (parser);
/* New types are not allowed in the type-specifier-seq for a
condition. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in conditions");
/* Parse the type-specifier-seq. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR,
&type_specifiers,
&declares_class_or_enum);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
/* If all is well, we might be looking at a declaration. */
if (!cp_parser_error_occurred (parser))
{
tree decl;
tree asm_specification;
tree attributes;
cp_declarator *declarator;
tree initializer = NULL_TREE;
/* Parse the declarator. */
declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false,
/*friend_p=*/false);
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
/* Parse the asm-specification. */
asm_specification = cp_parser_asm_specification_opt (parser);
/* If the next token is not an `=' or '{', then we might still be
looking at an expression. For example:
if (A(a).x)
looks like a decl-specifier-seq and a declarator -- but then
there is no `=', so this is an expression. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE))
cp_parser_simulate_error (parser);
/* If we did see an `=' or '{', then we are looking at a declaration
for sure. */
if (cp_parser_parse_definitely (parser))
{
tree pushed_scope;
bool non_constant_p;
bool flags = LOOKUP_ONLYCONVERTING;
/* Create the declaration. */
decl = start_decl (declarator, &type_specifiers,
/*initialized_p=*/true,
attributes, /*prefix_attributes=*/NULL_TREE,
&pushed_scope);
/* Parse the initializer. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
initializer = cp_parser_braced_list (parser, &non_constant_p);
CONSTRUCTOR_IS_DIRECT_INIT (initializer) = 1;
flags = 0;
}
else
{
/* Consume the `='. */
cp_parser_require (parser, CPP_EQ, RT_EQ);
initializer = cp_parser_initializer_clause (parser, &non_constant_p);
}
if (BRACE_ENCLOSED_INITIALIZER_P (initializer))
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
/* Process the initializer. */
cp_finish_decl (decl,
initializer, !non_constant_p,
asm_specification,
flags);
if (pushed_scope)
pop_scope (pushed_scope);
return convert_from_reference (decl);
}
}
/* If we didn't even get past the declarator successfully, we are
definitely not looking at a declaration. */
else
cp_parser_abort_tentative_parse (parser);
/* Otherwise, we are looking at an expression. */
return cp_parser_expression (parser);
}
/* Parses a for-statement or range-for-statement until the closing ')',
not included. */
static tree
cp_parser_for (cp_parser *parser, bool ivdep)
{
tree init, scope, decl;
bool is_range_for;
/* Begin the for-statement. */
scope = begin_for_scope (&init);
/* Parse the initialization. */
is_range_for = cp_parser_for_init_statement (parser, &decl);
if (is_range_for)
return cp_parser_range_for (parser, scope, init, decl, ivdep);
else
return cp_parser_c_for (parser, scope, init, ivdep);
}
static tree
cp_parser_c_for (cp_parser *parser, tree scope, tree init, bool ivdep)
{
/* Normal for loop */
tree condition = NULL_TREE;
tree expression = NULL_TREE;
tree stmt;
stmt = begin_for_stmt (scope, init);
/* The for-init-statement has already been parsed in
cp_parser_for_init_statement, so no work is needed here. */
finish_for_init_stmt (stmt);
/* If there's a condition, process it. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
condition = cp_parser_condition (parser);
else if (ivdep)
{
cp_parser_error (parser, "missing loop condition in loop with "
"%<GCC ivdep%> pragma");
condition = error_mark_node;
}
finish_for_cond (condition, stmt, ivdep);
/* Look for the `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
/* If there's an expression, process it. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
expression = cp_parser_expression (parser);
finish_for_expr (expression, stmt);
return stmt;
}
/* Tries to parse a range-based for-statement:
range-based-for:
decl-specifier-seq declarator : expression
The decl-specifier-seq declarator and the `:' are already parsed by
cp_parser_for_init_statement. If processing_template_decl it returns a
newly created RANGE_FOR_STMT; if not, it is converted to a
regular FOR_STMT. */
static tree
cp_parser_range_for (cp_parser *parser, tree scope, tree init, tree range_decl,
bool ivdep)
{
tree stmt, range_expr;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
bool expr_non_constant_p;
range_expr = cp_parser_braced_list (parser, &expr_non_constant_p);
}
else
range_expr = cp_parser_expression (parser);
/* If in template, STMT is converted to a normal for-statement
at instantiation. If not, it is done just ahead. */
if (processing_template_decl)
{
if (check_for_bare_parameter_packs (range_expr))
range_expr = error_mark_node;
stmt = begin_range_for_stmt (scope, init);
if (ivdep)
RANGE_FOR_IVDEP (stmt) = 1;
finish_range_for_decl (stmt, range_decl, range_expr);
if (!type_dependent_expression_p (range_expr)
/* do_auto_deduction doesn't mess with template init-lists. */
&& !BRACE_ENCLOSED_INITIALIZER_P (range_expr))
do_range_for_auto_deduction (range_decl, range_expr);
}
else
{
stmt = begin_for_stmt (scope, init);
stmt = cp_convert_range_for (stmt, range_decl, range_expr, ivdep);
}
return stmt;
}
/* Subroutine of cp_convert_range_for: given the initializer expression,
builds up the range temporary. */
static tree
build_range_temp (tree range_expr)
{
tree range_type, range_temp;
/* Find out the type deduced by the declaration
`auto &&__range = range_expr'. */
range_type = cp_build_reference_type (make_auto (), true);
range_type = do_auto_deduction (range_type, range_expr,
type_uses_auto (range_type));
/* Create the __range variable. */
range_temp = build_decl (input_location, VAR_DECL,
get_identifier ("__for_range"), range_type);
TREE_USED (range_temp) = 1;
DECL_ARTIFICIAL (range_temp) = 1;
return range_temp;
}
/* Used by cp_parser_range_for in template context: we aren't going to
do a full conversion yet, but we still need to resolve auto in the
type of the for-range-declaration if present. This is basically
a shortcut version of cp_convert_range_for. */
static void
do_range_for_auto_deduction (tree decl, tree range_expr)
{
tree auto_node = type_uses_auto (TREE_TYPE (decl));
if (auto_node)
{
tree begin_dummy, end_dummy, range_temp, iter_type, iter_decl;
range_temp = convert_from_reference (build_range_temp (range_expr));
iter_type = (cp_parser_perform_range_for_lookup
(range_temp, &begin_dummy, &end_dummy));
if (iter_type)
{
iter_decl = build_decl (input_location, VAR_DECL, NULL_TREE,
iter_type);
iter_decl = build_x_indirect_ref (input_location, iter_decl, RO_NULL,
tf_warning_or_error);
TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl),
iter_decl, auto_node);
}
}
}
/* Converts a range-based for-statement into a normal
for-statement, as per the definition.
for (RANGE_DECL : RANGE_EXPR)
BLOCK
should be equivalent to:
{
auto &&__range = RANGE_EXPR;
for (auto __begin = BEGIN_EXPR, end = END_EXPR;
__begin != __end;
++__begin)
{
RANGE_DECL = *__begin;
BLOCK
}
}
If RANGE_EXPR is an array:
BEGIN_EXPR = __range
END_EXPR = __range + ARRAY_SIZE(__range)
Else if RANGE_EXPR has a member 'begin' or 'end':
BEGIN_EXPR = __range.begin()
END_EXPR = __range.end()
Else:
BEGIN_EXPR = begin(__range)
END_EXPR = end(__range);
If __range has a member 'begin' but not 'end', or vice versa, we must
still use the second alternative (it will surely fail, however).
When calling begin()/end() in the third alternative we must use
argument dependent lookup, but always considering 'std' as an associated
namespace. */
tree
cp_convert_range_for (tree statement, tree range_decl, tree range_expr,
bool ivdep)
{
tree begin, end;
tree iter_type, begin_expr, end_expr;
tree condition, expression;
if (range_decl == error_mark_node || range_expr == error_mark_node)
/* If an error happened previously do nothing or else a lot of
unhelpful errors would be issued. */
begin_expr = end_expr = iter_type = error_mark_node;
else
{
tree range_temp;
if (TREE_CODE (range_expr) == VAR_DECL
&& array_of_runtime_bound_p (TREE_TYPE (range_expr)))
/* Can't bind a reference to an array of runtime bound. */
range_temp = range_expr;
else
{
range_temp = build_range_temp (range_expr);
pushdecl (range_temp);
cp_finish_decl (range_temp, range_expr,
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
range_temp = convert_from_reference (range_temp);
}
iter_type = cp_parser_perform_range_for_lookup (range_temp,
&begin_expr, &end_expr);
}
/* The new for initialization statement. */
begin = build_decl (input_location, VAR_DECL,
get_identifier ("__for_begin"), iter_type);
TREE_USED (begin) = 1;
DECL_ARTIFICIAL (begin) = 1;
pushdecl (begin);
cp_finish_decl (begin, begin_expr,
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
end = build_decl (input_location, VAR_DECL,
get_identifier ("__for_end"), iter_type);
TREE_USED (end) = 1;
DECL_ARTIFICIAL (end) = 1;
pushdecl (end);
cp_finish_decl (end, end_expr,
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
finish_for_init_stmt (statement);
/* The new for condition. */
condition = build_x_binary_op (input_location, NE_EXPR,
begin, ERROR_MARK,
end, ERROR_MARK,
NULL, tf_warning_or_error);
finish_for_cond (condition, statement, ivdep);
/* The new increment expression. */
expression = finish_unary_op_expr (input_location,
PREINCREMENT_EXPR, begin,
tf_warning_or_error);
finish_for_expr (expression, statement);
/* The declaration is initialized with *__begin inside the loop body. */
cp_finish_decl (range_decl,
build_x_indirect_ref (input_location, begin, RO_NULL,
tf_warning_or_error),
/*is_constant_init*/false, NULL_TREE,
LOOKUP_ONLYCONVERTING);
return statement;
}
/* Solves BEGIN_EXPR and END_EXPR as described in cp_convert_range_for.
We need to solve both at the same time because the method used
depends on the existence of members begin or end.
Returns the type deduced for the iterator expression. */
static tree
cp_parser_perform_range_for_lookup (tree range, tree *begin, tree *end)
{
if (error_operand_p (range))
{
*begin = *end = error_mark_node;
return error_mark_node;
}
if (!COMPLETE_TYPE_P (complete_type (TREE_TYPE (range))))
{
error ("range-based %<for%> expression of type %qT "
"has incomplete type", TREE_TYPE (range));
*begin = *end = error_mark_node;
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (range)) == ARRAY_TYPE)
{
/* If RANGE is an array, we will use pointer arithmetic. */
*begin = range;
*end = build_binary_op (input_location, PLUS_EXPR,
range,
array_type_nelts_top (TREE_TYPE (range)),
0);
return build_pointer_type (TREE_TYPE (TREE_TYPE (range)));
}
else
{
/* If it is not an array, we must do a bit of magic. */
tree id_begin, id_end;
tree member_begin, member_end;
*begin = *end = error_mark_node;
id_begin = get_identifier ("begin");
id_end = get_identifier ("end");
member_begin = lookup_member (TREE_TYPE (range), id_begin,
/*protect=*/2, /*want_type=*/false,
tf_warning_or_error);
member_end = lookup_member (TREE_TYPE (range), id_end,
/*protect=*/2, /*want_type=*/false,
tf_warning_or_error);
if (member_begin != NULL_TREE || member_end != NULL_TREE)
{
/* Use the member functions. */
if (member_begin != NULL_TREE)
*begin = cp_parser_range_for_member_function (range, id_begin);
else
error ("range-based %<for%> expression of type %qT has an "
"%<end%> member but not a %<begin%>", TREE_TYPE (range));
if (member_end != NULL_TREE)
*end = cp_parser_range_for_member_function (range, id_end);
else
error ("range-based %<for%> expression of type %qT has a "
"%<begin%> member but not an %<end%>", TREE_TYPE (range));
}
else
{
/* Use global functions with ADL. */
vec<tree, va_gc> *vec;
vec = make_tree_vector ();
vec_safe_push (vec, range);
member_begin = perform_koenig_lookup (id_begin, vec,
tf_warning_or_error);
*begin = finish_call_expr (member_begin, &vec, false, true,
tf_warning_or_error);
member_end = perform_koenig_lookup (id_end, vec,
tf_warning_or_error);
*end = finish_call_expr (member_end, &vec, false, true,
tf_warning_or_error);
release_tree_vector (vec);
}
/* Last common checks. */
if (*begin == error_mark_node || *end == error_mark_node)
{
/* If one of the expressions is an error do no more checks. */
*begin = *end = error_mark_node;
return error_mark_node;
}
else if (type_dependent_expression_p (*begin)
|| type_dependent_expression_p (*end))
/* Can happen, when, eg, in a template context, Koenig lookup
can't resolve begin/end (c++/58503). */
return NULL_TREE;
else
{
tree iter_type = cv_unqualified (TREE_TYPE (*begin));
/* The unqualified type of the __begin and __end temporaries should
be the same, as required by the multiple auto declaration. */
if (!same_type_p (iter_type, cv_unqualified (TREE_TYPE (*end))))
error ("inconsistent begin/end types in range-based %<for%> "
"statement: %qT and %qT",
TREE_TYPE (*begin), TREE_TYPE (*end));
return iter_type;
}
}
}
/* Helper function for cp_parser_perform_range_for_lookup.
Builds a tree for RANGE.IDENTIFIER(). */
static tree
cp_parser_range_for_member_function (tree range, tree identifier)
{
tree member, res;
vec<tree, va_gc> *vec;
member = finish_class_member_access_expr (range, identifier,
false, tf_warning_or_error);
if (member == error_mark_node)
return error_mark_node;
vec = make_tree_vector ();
res = finish_call_expr (member, &vec,
/*disallow_virtual=*/false,
/*koenig_p=*/false,
tf_warning_or_error);
release_tree_vector (vec);
return res;
}
/* Parse an iteration-statement.
iteration-statement:
while ( condition ) statement
do statement while ( expression ) ;
for ( for-init-statement condition [opt] ; expression [opt] )
statement
Returns the new WHILE_STMT, DO_STMT, FOR_STMT or RANGE_FOR_STMT. */
static tree
cp_parser_iteration_statement (cp_parser* parser, bool ivdep)
{
cp_token *token;
enum rid keyword;
tree statement;
unsigned char in_statement;
/* Peek at the next token. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_INTERATION);
if (!token)
return error_mark_node;
/* Remember whether or not we are already within an iteration
statement. */
in_statement = parser->in_statement;
/* See what kind of keyword it is. */
keyword = token->keyword;
switch (keyword)
{
case RID_WHILE:
{
tree condition;
/* Begin the while-statement. */
statement = begin_while_stmt ();
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the condition. */
condition = cp_parser_condition (parser);
finish_while_stmt_cond (condition, statement, ivdep);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Parse the dependent statement. */
parser->in_statement = IN_ITERATION_STMT;
cp_parser_already_scoped_statement (parser);
parser->in_statement = in_statement;
/* We're done with the while-statement. */
finish_while_stmt (statement);
}
break;
case RID_DO:
{
tree expression;
/* Begin the do-statement. */
statement = begin_do_stmt ();
/* Parse the body of the do-statement. */
parser->in_statement = IN_ITERATION_STMT;
cp_parser_implicitly_scoped_statement (parser, NULL);
parser->in_statement = in_statement;
finish_do_body (statement);
/* Look for the `while' keyword. */
cp_parser_require_keyword (parser, RID_WHILE, RT_WHILE);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the expression. */
expression = cp_parser_expression (parser);
/* We're done with the do-statement. */
finish_do_stmt (expression, statement, ivdep);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Look for the `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
}
break;
case RID_FOR:
{
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
statement = cp_parser_for (parser, ivdep);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* Parse the body of the for-statement. */
parser->in_statement = IN_ITERATION_STMT;
cp_parser_already_scoped_statement (parser);
parser->in_statement = in_statement;
/* We're done with the for-statement. */
finish_for_stmt (statement);
}
break;
default:
cp_parser_error (parser, "expected iteration-statement");
statement = error_mark_node;
break;
}
return statement;
}
/* Parse a for-init-statement or the declarator of a range-based-for.
Returns true if a range-based-for declaration is seen.
for-init-statement:
expression-statement
simple-declaration */
static bool
cp_parser_for_init_statement (cp_parser* parser, tree *decl)
{
/* If the next token is a `;', then we have an empty
expression-statement. Grammatically, this is also a
simple-declaration, but an invalid one, because it does not
declare anything. Therefore, if we did not handle this case
specially, we would issue an error message about an invalid
declaration. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
bool is_range_for = false;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)
&& cp_lexer_nth_token_is (parser->lexer, 2, CPP_COLON))
{
/* N3994 -- for (id : init) ... */
if (cxx_dialect < cxx1z)
pedwarn (input_location, 0, "range-based for loop without a "
"type-specifier only available with "
"-std=c++1z or -std=gnu++1z");
tree name = cp_parser_identifier (parser);
tree type = cp_build_reference_type (make_auto (), /*rval*/true);
*decl = build_decl (input_location, VAR_DECL, name, type);
pushdecl (*decl);
cp_lexer_consume_token (parser->lexer);
return true;
}
/* A colon is used in range-based for. */
parser->colon_corrects_to_scope_p = false;
/* We're going to speculatively look for a declaration, falling back
to an expression, if necessary. */
cp_parser_parse_tentatively (parser);
/* Parse the declaration. */
cp_parser_simple_declaration (parser,
/*function_definition_allowed_p=*/false,
decl);
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* It is a range-for, consume the ':' */
cp_lexer_consume_token (parser->lexer);
is_range_for = true;
if (cxx_dialect < cxx11)
{
pedwarn (cp_lexer_peek_token (parser->lexer)->location, 0,
"range-based %<for%> loops only available with "
"-std=c++11 or -std=gnu++11");
*decl = error_mark_node;
}
}
else
/* The ';' is not consumed yet because we told
cp_parser_simple_declaration not to. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (cp_parser_parse_definitely (parser))
return is_range_for;
/* If the tentative parse failed, then we shall need to look for an
expression-statement. */
}
/* If we are here, it is an expression-statement. */
cp_parser_expression_statement (parser, NULL_TREE);
return false;
}
/* Parse a jump-statement.
jump-statement:
break ;
continue ;
return expression [opt] ;
return braced-init-list ;
goto identifier ;
GNU extension:
jump-statement:
goto * expression ;
Returns the new BREAK_STMT, CONTINUE_STMT, RETURN_EXPR, or GOTO_EXPR. */
static tree
cp_parser_jump_statement (cp_parser* parser)
{
tree statement = error_mark_node;
cp_token *token;
enum rid keyword;
unsigned char in_statement;
/* Peek at the next token. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_JUMP);
if (!token)
return error_mark_node;
/* See what kind of keyword it is. */
keyword = token->keyword;
switch (keyword)
{
case RID_BREAK:
in_statement = parser->in_statement & ~IN_IF_STMT;
switch (in_statement)
{
case 0:
error_at (token->location, "break statement not within loop or switch");
break;
default:
gcc_assert ((in_statement & IN_SWITCH_STMT)
|| in_statement == IN_ITERATION_STMT);
statement = finish_break_stmt ();
if (in_statement == IN_ITERATION_STMT)
break_maybe_infinite_loop ();
break;
case IN_OMP_BLOCK:
error_at (token->location, "invalid exit from OpenMP structured block");
break;
case IN_OMP_FOR:
error_at (token->location, "break statement used with OpenMP for loop");
break;
case IN_CILK_SIMD_FOR:
error_at (token->location, "break statement used with Cilk Plus for loop");
break;
}
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
break;
case RID_CONTINUE:
switch (parser->in_statement & ~(IN_SWITCH_STMT | IN_IF_STMT))
{
case 0:
error_at (token->location, "continue statement not within a loop");
break;
case IN_CILK_SIMD_FOR:
error_at (token->location,
"continue statement within %<#pragma simd%> loop body");
/* Fall through. */
case IN_ITERATION_STMT:
case IN_OMP_FOR:
statement = finish_continue_stmt ();
break;
case IN_OMP_BLOCK:
error_at (token->location, "invalid exit from OpenMP structured block");
break;
default:
gcc_unreachable ();
}
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
break;
case RID_RETURN:
{
tree expr;
bool expr_non_constant_p;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_lexer_set_source_position (parser->lexer);
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
expr = cp_parser_braced_list (parser, &expr_non_constant_p);
}
else if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
expr = cp_parser_expression (parser);
else
/* If the next token is a `;', then there is no
expression. */
expr = NULL_TREE;
/* Build the return-statement. */
statement = finish_return_stmt (expr);
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
}
break;
case RID_GOTO:
if (parser->in_function_body
&& DECL_DECLARED_CONSTEXPR_P (current_function_decl))
{
error ("%<goto%> in %<constexpr%> function");
cp_function_chain->invalid_constexpr = true;
}
/* Create the goto-statement. */
if (cp_lexer_next_token_is (parser->lexer, CPP_MULT))
{
/* Issue a warning about this use of a GNU extension. */
pedwarn (token->location, OPT_Wpedantic, "ISO C++ forbids computed gotos");
/* Consume the '*' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the dependent expression. */
finish_goto_stmt (cp_parser_expression (parser));
}
else
finish_goto_stmt (cp_parser_identifier (parser));
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
break;
default:
cp_parser_error (parser, "expected jump-statement");
break;
}
return statement;
}
/* Parse a declaration-statement.
declaration-statement:
block-declaration */
static void
cp_parser_declaration_statement (cp_parser* parser)
{
void *p;
/* Get the high-water mark for the DECLARATOR_OBSTACK. */
p = obstack_alloc (&declarator_obstack, 0);
/* Parse the block-declaration. */
cp_parser_block_declaration (parser, /*statement_p=*/true);
/* Free any declarators allocated. */
obstack_free (&declarator_obstack, p);
}
/* Some dependent statements (like `if (cond) statement'), are
implicitly in their own scope. In other words, if the statement is
a single statement (as opposed to a compound-statement), it is
none-the-less treated as if it were enclosed in braces. Any
declarations appearing in the dependent statement are out of scope
after control passes that point. This function parses a statement,
but ensures that is in its own scope, even if it is not a
compound-statement.
If IF_P is not NULL, *IF_P is set to indicate whether the statement
is a (possibly labeled) if statement which is not enclosed in
braces and has an else clause. This is used to implement
-Wparentheses.
Returns the new statement. */
static tree
cp_parser_implicitly_scoped_statement (cp_parser* parser, bool *if_p)
{
tree statement;
if (if_p != NULL)
*if_p = false;
/* Mark if () ; with a special NOP_EXPR. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer);
statement = add_stmt (build_empty_stmt (loc));
}
/* if a compound is opened, we simply parse the statement directly. */
else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
statement = cp_parser_compound_statement (parser, NULL, false, false);
/* If the token is not a `{', then we must take special action. */
else
{
/* Create a compound-statement. */
statement = begin_compound_stmt (0);
/* Parse the dependent-statement. */
cp_parser_statement (parser, NULL_TREE, false, if_p);
/* Finish the dummy compound-statement. */
finish_compound_stmt (statement);
}
/* Return the statement. */
return statement;
}
/* For some dependent statements (like `while (cond) statement'), we
have already created a scope. Therefore, even if the dependent
statement is a compound-statement, we do not want to create another
scope. */
static void
cp_parser_already_scoped_statement (cp_parser* parser)
{
/* If the token is a `{', then we must take special action. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE))
cp_parser_statement (parser, NULL_TREE, false, NULL);
else
{
/* Avoid calling cp_parser_compound_statement, so that we
don't create a new scope. Do everything else by hand. */
cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE);
/* If the next keyword is `__label__' we have a label declaration. */
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL))
cp_parser_label_declaration (parser);
/* Parse an (optional) statement-seq. */
cp_parser_statement_seq_opt (parser, NULL_TREE);
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
}
/* Declarations [gram.dcl.dcl] */
/* Parse an optional declaration-sequence.
declaration-seq:
declaration
declaration-seq declaration */
static void
cp_parser_declaration_seq_opt (cp_parser* parser)
{
while (true)
{
cp_token *token;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
if (token->type == CPP_SEMICOLON)
{
/* A declaration consisting of a single semicolon is
invalid. Allow it unless we're being pedantic. */
cp_lexer_consume_token (parser->lexer);
if (!in_system_header_at (input_location))
pedwarn (input_location, OPT_Wpedantic, "extra %<;%>");
continue;
}
/* If we're entering or exiting a region that's implicitly
extern "C", modify the lang context appropriately. */
if (!parser->implicit_extern_c && token->implicit_extern_c)
{
push_lang_context (lang_name_c);
parser->implicit_extern_c = true;
}
else if (parser->implicit_extern_c && !token->implicit_extern_c)
{
pop_lang_context ();
parser->implicit_extern_c = false;
}
if (token->type == CPP_PRAGMA)
{
/* A top-level declaration can consist solely of a #pragma.
A nested declaration cannot, so this is done here and not
in cp_parser_declaration. (A #pragma at block scope is
handled in cp_parser_statement.) */
cp_parser_pragma (parser, pragma_external);
continue;
}
/* Parse the declaration itself. */
cp_parser_declaration (parser);
}
}
/* Parse a declaration.
declaration:
block-declaration
function-definition
template-declaration
explicit-instantiation
explicit-specialization
linkage-specification
namespace-definition
GNU extension:
declaration:
__extension__ declaration */
static void
cp_parser_declaration (cp_parser* parser)
{
cp_token token1;
cp_token token2;
int saved_pedantic;
void *p;
tree attributes = NULL_TREE;
/* Check for the `__extension__' keyword. */
if (cp_parser_extension_opt (parser, &saved_pedantic))
{
/* Parse the qualified declaration. */
cp_parser_declaration (parser);
/* Restore the PEDANTIC flag. */
pedantic = saved_pedantic;
return;
}
/* Try to figure out what kind of declaration is present. */
token1 = *cp_lexer_peek_token (parser->lexer);
if (token1.type != CPP_EOF)
token2 = *cp_lexer_peek_nth_token (parser->lexer, 2);
else
{
token2.type = CPP_EOF;
token2.keyword = RID_MAX;
}
/* Get the high-water mark for the DECLARATOR_OBSTACK. */
p = obstack_alloc (&declarator_obstack, 0);
/* If the next token is `extern' and the following token is a string
literal, then we have a linkage specification. */
if (token1.keyword == RID_EXTERN
&& cp_parser_is_pure_string_literal (&token2))
cp_parser_linkage_specification (parser);
/* If the next token is `template', then we have either a template
declaration, an explicit instantiation, or an explicit
specialization. */
else if (token1.keyword == RID_TEMPLATE)
{
/* `template <>' indicates a template specialization. */
if (token2.type == CPP_LESS
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER)
cp_parser_explicit_specialization (parser);
/* `template <' indicates a template declaration. */
else if (token2.type == CPP_LESS)
cp_parser_template_declaration (parser, /*member_p=*/false);
/* Anything else must be an explicit instantiation. */
else
cp_parser_explicit_instantiation (parser);
}
/* If the next token is `export', then we have a template
declaration. */
else if (token1.keyword == RID_EXPORT)
cp_parser_template_declaration (parser, /*member_p=*/false);
/* If the next token is `extern', 'static' or 'inline' and the one
after that is `template', we have a GNU extended explicit
instantiation directive. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& (token1.keyword == RID_EXTERN
|| token1.keyword == RID_STATIC
|| token1.keyword == RID_INLINE)
&& token2.keyword == RID_TEMPLATE)
cp_parser_explicit_instantiation (parser);
/* If the next token is `namespace', check for a named or unnamed
namespace definition. */
else if (token1.keyword == RID_NAMESPACE
&& (/* A named namespace definition. */
(token2.type == CPP_NAME
&& (cp_lexer_peek_nth_token (parser->lexer, 3)->type
!= CPP_EQ))
/* An unnamed namespace definition. */
|| token2.type == CPP_OPEN_BRACE
|| token2.keyword == RID_ATTRIBUTE))
cp_parser_namespace_definition (parser);
/* An inline (associated) namespace definition. */
else if (token1.keyword == RID_INLINE
&& token2.keyword == RID_NAMESPACE)
cp_parser_namespace_definition (parser);
/* Objective-C++ declaration/definition. */
else if (c_dialect_objc () && OBJC_IS_AT_KEYWORD (token1.keyword))
cp_parser_objc_declaration (parser, NULL_TREE);
else if (c_dialect_objc ()
&& token1.keyword == RID_ATTRIBUTE
&& cp_parser_objc_valid_prefix_attributes (parser, &attributes))
cp_parser_objc_declaration (parser, attributes);
/* We must have either a block declaration or a function
definition. */
else
/* Try to parse a block-declaration, or a function-definition. */
cp_parser_block_declaration (parser, /*statement_p=*/false);
/* Free any declarators allocated. */
obstack_free (&declarator_obstack, p);
}
/* Parse a block-declaration.
block-declaration:
simple-declaration
asm-definition
namespace-alias-definition
using-declaration
using-directive
GNU Extension:
block-declaration:
__extension__ block-declaration
C++0x Extension:
block-declaration:
static_assert-declaration
If STATEMENT_P is TRUE, then this block-declaration is occurring as
part of a declaration-statement. */
static void
cp_parser_block_declaration (cp_parser *parser,
bool statement_p)
{
cp_token *token1;
int saved_pedantic;
/* Check for the `__extension__' keyword. */
if (cp_parser_extension_opt (parser, &saved_pedantic))
{
/* Parse the qualified declaration. */
cp_parser_block_declaration (parser, statement_p);
/* Restore the PEDANTIC flag. */
pedantic = saved_pedantic;
return;
}
/* Peek at the next token to figure out which kind of declaration is
present. */
token1 = cp_lexer_peek_token (parser->lexer);
/* If the next keyword is `asm', we have an asm-definition. */
if (token1->keyword == RID_ASM)
{
if (statement_p)
cp_parser_commit_to_tentative_parse (parser);
cp_parser_asm_definition (parser);
}
/* If the next keyword is `namespace', we have a
namespace-alias-definition. */
else if (token1->keyword == RID_NAMESPACE)
cp_parser_namespace_alias_definition (parser);
/* If the next keyword is `using', we have a
using-declaration, a using-directive, or an alias-declaration. */
else if (token1->keyword == RID_USING)
{
cp_token *token2;
if (statement_p)
cp_parser_commit_to_tentative_parse (parser);
/* If the token after `using' is `namespace', then we have a
using-directive. */
token2 = cp_lexer_peek_nth_token (parser->lexer, 2);
if (token2->keyword == RID_NAMESPACE)
cp_parser_using_directive (parser);
/* If the second token after 'using' is '=', then we have an
alias-declaration. */
else if (cxx_dialect >= cxx11
&& token2->type == CPP_NAME
&& ((cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_EQ)
|| (cp_nth_tokens_can_be_attribute_p (parser, 3))))
cp_parser_alias_declaration (parser);
/* Otherwise, it's a using-declaration. */
else
cp_parser_using_declaration (parser,
/*access_declaration_p=*/false);
}
/* If the next keyword is `__label__' we have a misplaced label
declaration. */
else if (token1->keyword == RID_LABEL)
{
cp_lexer_consume_token (parser->lexer);
error_at (token1->location, "%<__label__%> not at the beginning of a block");
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
}
/* If the next token is `static_assert' we have a static assertion. */
else if (token1->keyword == RID_STATIC_ASSERT)
cp_parser_static_assert (parser, /*member_p=*/false);
/* Anything else must be a simple-declaration. */
else
cp_parser_simple_declaration (parser, !statement_p,
/*maybe_range_for_decl*/NULL);
}
/* Parse a simple-declaration.
simple-declaration:
decl-specifier-seq [opt] init-declarator-list [opt] ;
init-declarator-list:
init-declarator
init-declarator-list , init-declarator
If FUNCTION_DEFINITION_ALLOWED_P is TRUE, then we also recognize a
function-definition as a simple-declaration.
If MAYBE_RANGE_FOR_DECL is not NULL, the pointed tree will be set to the
parsed declaration if it is an uninitialized single declarator not followed
by a `;', or to error_mark_node otherwise. Either way, the trailing `;',
if present, will not be consumed. */
static void
cp_parser_simple_declaration (cp_parser* parser,
bool function_definition_allowed_p,
tree *maybe_range_for_decl)
{
cp_decl_specifier_seq decl_specifiers;
int declares_class_or_enum;
bool saw_declarator;
location_t comma_loc = UNKNOWN_LOCATION;
location_t init_loc = UNKNOWN_LOCATION;
if (maybe_range_for_decl)
*maybe_range_for_decl = NULL_TREE;
/* Defer access checks until we know what is being declared; the
checks for names appearing in the decl-specifier-seq should be
done as if we were in the scope of the thing being declared. */
push_deferring_access_checks (dk_deferred);
/* Parse the decl-specifier-seq. We have to keep track of whether
or not the decl-specifier-seq declares a named class or
enumeration type, since that is the only case in which the
init-declarator-list is allowed to be empty.
[dcl.dcl]
In a simple-declaration, the optional init-declarator-list can be
omitted only when declaring a class or enumeration, that is when
the decl-specifier-seq contains either a class-specifier, an
elaborated-type-specifier, or an enum-specifier. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
/* We no longer need to defer access checks. */
stop_deferring_access_checks ();
/* In a block scope, a valid declaration must always have a
decl-specifier-seq. By not trying to parse declarators, we can
resolve the declaration/expression ambiguity more quickly. */
if (!function_definition_allowed_p
&& !decl_specifiers.any_specifiers_p)
{
cp_parser_error (parser, "expected declaration");
goto done;
}
/* If the next two tokens are both identifiers, the code is
erroneous. The usual cause of this situation is code like:
T t;
where "T" should name a type -- but does not. */
if (!decl_specifiers.any_type_specifiers_p
&& cp_parser_parse_and_diagnose_invalid_type_name (parser))
{
/* If parsing tentatively, we should commit; we really are
looking at a declaration. */
cp_parser_commit_to_tentative_parse (parser);
/* Give up. */
goto done;
}
/* If we have seen at least one decl-specifier, and the next token
is not a parenthesis, then we must be looking at a declaration.
(After "int (" we might be looking at a functional cast.) */
if (decl_specifiers.any_specifiers_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)
&& !cp_parser_error_occurred (parser))
cp_parser_commit_to_tentative_parse (parser);
/* Keep going until we hit the `;' at the end of the simple
declaration. */
saw_declarator = false;
while (cp_lexer_next_token_is_not (parser->lexer,
CPP_SEMICOLON))
{
cp_token *token;
bool function_definition_p;
tree decl;
if (saw_declarator)
{
/* If we are processing next declarator, comma is expected */
token = cp_lexer_peek_token (parser->lexer);
gcc_assert (token->type == CPP_COMMA);
cp_lexer_consume_token (parser->lexer);
if (maybe_range_for_decl)
{
*maybe_range_for_decl = error_mark_node;
if (comma_loc == UNKNOWN_LOCATION)
comma_loc = token->location;
}
}
else
saw_declarator = true;
/* Parse the init-declarator. */
decl = cp_parser_init_declarator (parser, &decl_specifiers,
/*checks=*/NULL,
function_definition_allowed_p,
/*member_p=*/false,
declares_class_or_enum,
&function_definition_p,
maybe_range_for_decl,
&init_loc);
/* If an error occurred while parsing tentatively, exit quickly.
(That usually happens when in the body of a function; each
statement is treated as a declaration-statement until proven
otherwise.) */
if (cp_parser_error_occurred (parser))
goto done;
/* Handle function definitions specially. */
if (function_definition_p)
{
/* If the next token is a `,', then we are probably
processing something like:
void f() {}, *p;
which is erroneous. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
error_at (token->location,
"mixing"
" declarations and function-definitions is forbidden");
}
/* Otherwise, we're done with the list of declarators. */
else
{
pop_deferring_access_checks ();
return;
}
}
if (maybe_range_for_decl && *maybe_range_for_decl == NULL_TREE)
*maybe_range_for_decl = decl;
/* The next token should be either a `,' or a `;'. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `,', there are more declarators to come. */
if (token->type == CPP_COMMA)
/* will be consumed next time around */;
/* If it's a `;', we are done. */
else if (token->type == CPP_SEMICOLON || maybe_range_for_decl)
break;
/* Anything else is an error. */
else
{
/* If we have already issued an error message we don't need
to issue another one. */
if (decl != error_mark_node
|| cp_parser_uncommitted_to_tentative_parse_p (parser))
cp_parser_error (parser, "expected %<,%> or %<;%>");
/* Skip tokens until we reach the end of the statement. */
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
goto done;
}
/* After the first time around, a function-definition is not
allowed -- even if it was OK at first. For example:
int i, f() {}
is not valid. */
function_definition_allowed_p = false;
}
/* Issue an error message if no declarators are present, and the
decl-specifier-seq does not itself declare a class or
enumeration: [dcl.dcl]/3. */
if (!saw_declarator)
{
if (cp_parser_declares_only_class_p (parser))
{
if (!declares_class_or_enum
&& decl_specifiers.type
&& OVERLOAD_TYPE_P (decl_specifiers.type))
/* Ensure an error is issued anyway when finish_decltype_type,
called via cp_parser_decl_specifier_seq, returns a class or
an enumeration (c++/51786). */
decl_specifiers.type = NULL_TREE;
shadow_tag (&decl_specifiers);
}
/* Perform any deferred access checks. */
perform_deferred_access_checks (tf_warning_or_error);
}
/* Consume the `;'. */
if (!maybe_range_for_decl)
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
else if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
if (init_loc != UNKNOWN_LOCATION)
error_at (init_loc, "initializer in range-based %<for%> loop");
if (comma_loc != UNKNOWN_LOCATION)
error_at (comma_loc,
"multiple declarations in range-based %<for%> loop");
}
done:
pop_deferring_access_checks ();
}
/* Parse a decl-specifier-seq.
decl-specifier-seq:
decl-specifier-seq [opt] decl-specifier
decl-specifier attribute-specifier-seq [opt] (C++11)
decl-specifier:
storage-class-specifier
type-specifier
function-specifier
friend
typedef
GNU Extension:
decl-specifier:
attributes
Set *DECL_SPECS to a representation of the decl-specifier-seq.
The parser flags FLAGS is used to control type-specifier parsing.
*DECLARES_CLASS_OR_ENUM is set to the bitwise or of the following
flags:
1: one of the decl-specifiers is an elaborated-type-specifier
(i.e., a type declaration)
2: one of the decl-specifiers is an enum-specifier or a
class-specifier (i.e., a type definition)
*/
static void
cp_parser_decl_specifier_seq (cp_parser* parser,
cp_parser_flags flags,
cp_decl_specifier_seq *decl_specs,
int* declares_class_or_enum)
{
bool constructor_possible_p = !parser->in_declarator_p;
bool found_decl_spec = false;
cp_token *start_token = NULL;
cp_decl_spec ds;
/* Clear DECL_SPECS. */
clear_decl_specs (decl_specs);
/* Assume no class or enumeration type is declared. */
*declares_class_or_enum = 0;
/* Keep reading specifiers until there are no more to read. */
while (true)
{
bool constructor_p;
cp_token *token;
ds = ds_last;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Save the first token of the decl spec list for error
reporting. */
if (!start_token)
start_token = token;
/* Handle attributes. */
if (cp_next_tokens_can_be_attribute_p (parser))
{
/* Parse the attributes. */
tree attrs = cp_parser_attributes_opt (parser);
/* In a sequence of declaration specifiers, c++11 attributes
appertain to the type that precede them. In that case
[dcl.spec]/1 says:
The attribute-specifier-seq affects the type only for
the declaration it appears in, not other declarations
involving the same type.
But for now let's force the user to position the
attribute either at the beginning of the declaration or
after the declarator-id, which would clearly mean that it
applies to the declarator. */
if (cxx11_attribute_p (attrs))
{
if (!found_decl_spec)
/* The c++11 attribute is at the beginning of the
declaration. It appertains to the entity being
declared. */;
else
{
if (decl_specs->type && CLASS_TYPE_P (decl_specs->type))
{
/* This is an attribute following a
class-specifier. */
if (decl_specs->type_definition_p)
warn_misplaced_attr_for_class_type (token->location,
decl_specs->type);
attrs = NULL_TREE;
}
else
{
decl_specs->std_attributes
= chainon (decl_specs->std_attributes,
attrs);
if (decl_specs->locations[ds_std_attribute] == 0)
decl_specs->locations[ds_std_attribute] = token->location;
}
continue;
}
}
decl_specs->attributes
= chainon (decl_specs->attributes,
attrs);
if (decl_specs->locations[ds_attribute] == 0)
decl_specs->locations[ds_attribute] = token->location;
continue;
}
/* Assume we will find a decl-specifier keyword. */
found_decl_spec = true;
/* If the next token is an appropriate keyword, we can simply
add it to the list. */
switch (token->keyword)
{
/* decl-specifier:
friend
constexpr */
case RID_FRIEND:
if (!at_class_scope_p ())
{
error_at (token->location, "%<friend%> used outside of class");
cp_lexer_purge_token (parser->lexer);
}
else
{
ds = ds_friend;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
break;
case RID_CONSTEXPR:
ds = ds_constexpr;
cp_lexer_consume_token (parser->lexer);
break;
/* function-specifier:
inline
virtual
explicit */
case RID_INLINE:
case RID_VIRTUAL:
case RID_EXPLICIT:
cp_parser_function_specifier_opt (parser, decl_specs);
break;
/* decl-specifier:
typedef */
case RID_TYPEDEF:
ds = ds_typedef;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* A constructor declarator cannot appear in a typedef. */
constructor_possible_p = false;
/* The "typedef" keyword can only occur in a declaration; we
may as well commit at this point. */
cp_parser_commit_to_tentative_parse (parser);
if (decl_specs->storage_class != sc_none)
decl_specs->conflicting_specifiers_p = true;
break;
/* storage-class-specifier:
auto
register
static
extern
mutable
GNU Extension:
thread */
case RID_AUTO:
if (cxx_dialect == cxx98)
{
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* Complain about `auto' as a storage specifier, if
we're complaining about C++0x compatibility. */
warning_at (token->location, OPT_Wc__0x_compat, "%<auto%>"
" changes meaning in C++11; please remove it");
/* Set the storage class anyway. */
cp_parser_set_storage_class (parser, decl_specs, RID_AUTO,
token);
}
else
/* C++0x auto type-specifier. */
found_decl_spec = false;
break;
case RID_REGISTER:
case RID_STATIC:
case RID_EXTERN:
case RID_MUTABLE:
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
cp_parser_set_storage_class (parser, decl_specs, token->keyword,
token);
break;
case RID_THREAD:
/* Consume the token. */
ds = ds_thread;
cp_lexer_consume_token (parser->lexer);
break;
default:
/* We did not yet find a decl-specifier yet. */
found_decl_spec = false;
break;
}
if (found_decl_spec
&& (flags & CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR)
&& token->keyword != RID_CONSTEXPR)
error ("decl-specifier invalid in condition");
if (ds != ds_last)
set_and_check_decl_spec_loc (decl_specs, ds, token);
/* Constructors are a special case. The `S' in `S()' is not a
decl-specifier; it is the beginning of the declarator. */
constructor_p
= (!found_decl_spec
&& constructor_possible_p
&& (cp_parser_constructor_declarator_p
(parser, decl_spec_seq_has_spec_p (decl_specs, ds_friend))));
/* If we don't have a DECL_SPEC yet, then we must be looking at
a type-specifier. */
if (!found_decl_spec && !constructor_p)
{
int decl_spec_declares_class_or_enum;
bool is_cv_qualifier;
tree type_spec;
type_spec
= cp_parser_type_specifier (parser, flags,
decl_specs,
/*is_declaration=*/true,
&decl_spec_declares_class_or_enum,
&is_cv_qualifier);
*declares_class_or_enum |= decl_spec_declares_class_or_enum;
/* If this type-specifier referenced a user-defined type
(a typedef, class-name, etc.), then we can't allow any
more such type-specifiers henceforth.
[dcl.spec]
The longest sequence of decl-specifiers that could
possibly be a type name is taken as the
decl-specifier-seq of a declaration. The sequence shall
be self-consistent as described below.
[dcl.type]
As a general rule, at most one type-specifier is allowed
in the complete decl-specifier-seq of a declaration. The
only exceptions are the following:
-- const or volatile can be combined with any other
type-specifier.
-- signed or unsigned can be combined with char, long,
short, or int.
-- ..
Example:
typedef char* Pc;
void g (const int Pc);
Here, Pc is *not* part of the decl-specifier seq; it's
the declarator. Therefore, once we see a type-specifier
(other than a cv-qualifier), we forbid any additional
user-defined types. We *do* still allow things like `int
int' to be considered a decl-specifier-seq, and issue the
error message later. */
if (type_spec && !is_cv_qualifier)
flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES;
/* A constructor declarator cannot follow a type-specifier. */
if (type_spec)
{
constructor_possible_p = false;
found_decl_spec = true;
if (!is_cv_qualifier)
decl_specs->any_type_specifiers_p = true;
}
}
/* If we still do not have a DECL_SPEC, then there are no more
decl-specifiers. */
if (!found_decl_spec)
break;
decl_specs->any_specifiers_p = true;
/* After we see one decl-specifier, further decl-specifiers are
always optional. */
flags |= CP_PARSER_FLAGS_OPTIONAL;
}
/* Don't allow a friend specifier with a class definition. */
if (decl_spec_seq_has_spec_p (decl_specs, ds_friend)
&& (*declares_class_or_enum & 2))
error_at (decl_specs->locations[ds_friend],
"class definition may not be declared a friend");
}
/* Parse an (optional) storage-class-specifier.
storage-class-specifier:
auto
register
static
extern
mutable
GNU Extension:
storage-class-specifier:
thread
Returns an IDENTIFIER_NODE corresponding to the keyword used. */
static tree
cp_parser_storage_class_specifier_opt (cp_parser* parser)
{
switch (cp_lexer_peek_token (parser->lexer)->keyword)
{
case RID_AUTO:
if (cxx_dialect != cxx98)
return NULL_TREE;
/* Fall through for C++98. */
case RID_REGISTER:
case RID_STATIC:
case RID_EXTERN:
case RID_MUTABLE:
case RID_THREAD:
/* Consume the token. */
return cp_lexer_consume_token (parser->lexer)->u.value;
default:
return NULL_TREE;
}
}
/* Parse an (optional) function-specifier.
function-specifier:
inline
virtual
explicit
Returns an IDENTIFIER_NODE corresponding to the keyword used.
Updates DECL_SPECS, if it is non-NULL. */
static tree
cp_parser_function_specifier_opt (cp_parser* parser,
cp_decl_specifier_seq *decl_specs)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->keyword)
{
case RID_INLINE:
set_and_check_decl_spec_loc (decl_specs, ds_inline, token);
break;
case RID_VIRTUAL:
/* 14.5.2.3 [temp.mem]
A member function template shall not be virtual. */
if (PROCESSING_REAL_TEMPLATE_DECL_P ())
error_at (token->location, "templates may not be %<virtual%>");
else
set_and_check_decl_spec_loc (decl_specs, ds_virtual, token);
break;
case RID_EXPLICIT:
set_and_check_decl_spec_loc (decl_specs, ds_explicit, token);
break;
default:
return NULL_TREE;
}
/* Consume the token. */
return cp_lexer_consume_token (parser->lexer)->u.value;
}
/* Parse a linkage-specification.
linkage-specification:
extern string-literal { declaration-seq [opt] }
extern string-literal declaration */
static void
cp_parser_linkage_specification (cp_parser* parser)
{
tree linkage;
/* Look for the `extern' keyword. */
cp_parser_require_keyword (parser, RID_EXTERN, RT_EXTERN);
/* Look for the string-literal. */
linkage = cp_parser_string_literal (parser, false, false);
/* Transform the literal into an identifier. If the literal is a
wide-character string, or contains embedded NULs, then we can't
handle it as the user wants. */
if (strlen (TREE_STRING_POINTER (linkage))
!= (size_t) (TREE_STRING_LENGTH (linkage) - 1))
{
cp_parser_error (parser, "invalid linkage-specification");
/* Assume C++ linkage. */
linkage = lang_name_cplusplus;
}
else
linkage = get_identifier (TREE_STRING_POINTER (linkage));
/* We're now using the new linkage. */
push_lang_context (linkage);
/* If the next token is a `{', then we're using the first
production. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_ensure_no_omp_declare_simd (parser);
/* Consume the `{' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the declarations. */
cp_parser_declaration_seq_opt (parser);
/* Look for the closing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
/* Otherwise, there's just one declaration. */
else
{
bool saved_in_unbraced_linkage_specification_p;
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = true;
cp_parser_declaration (parser);
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
}
/* We're done with the linkage-specification. */
pop_lang_context ();
}
/* Parse a static_assert-declaration.
static_assert-declaration:
static_assert ( constant-expression , string-literal ) ;
If MEMBER_P, this static_assert is a class member. */
static void
cp_parser_static_assert(cp_parser *parser, bool member_p)
{
tree condition;
tree message;
cp_token *token;
location_t saved_loc;
bool dummy;
/* Peek at the `static_assert' token so we can keep track of exactly
where the static assertion started. */
token = cp_lexer_peek_token (parser->lexer);
saved_loc = token->location;
/* Look for the `static_assert' keyword. */
if (!cp_parser_require_keyword (parser, RID_STATIC_ASSERT,
RT_STATIC_ASSERT))
return;
/* We know we are in a static assertion; commit to any tentative
parse. */
if (cp_parser_parsing_tentatively (parser))
cp_parser_commit_to_tentative_parse (parser);
/* Parse the `(' starting the static assertion condition. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the constant-expression. Allow a non-constant expression
here in order to give better diagnostics in finish_static_assert. */
condition =
cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/true,
/*non_constant_p=*/&dummy);
/* Parse the separating `,'. */
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
/* Parse the string-literal message. */
message = cp_parser_string_literal (parser,
/*translate=*/false,
/*wide_ok=*/true);
/* A `)' completes the static assertion. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
/* A semicolon terminates the declaration. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
/* Complete the static assertion, which may mean either processing
the static assert now or saving it for template instantiation. */
finish_static_assert (condition, message, saved_loc, member_p);
}
/* Parse the expression in decltype ( expression ). */
static tree
cp_parser_decltype_expr (cp_parser *parser,
bool &id_expression_or_member_access_p)
{
cp_token *id_expr_start_token;
tree expr;
/* First, try parsing an id-expression. */
id_expr_start_token = cp_lexer_peek_token (parser->lexer);
cp_parser_parse_tentatively (parser);
expr = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/false,
/*optional_p=*/false);
if (!cp_parser_error_occurred (parser) && expr != error_mark_node)
{
bool non_integral_constant_expression_p = false;
tree id_expression = expr;
cp_id_kind idk;
const char *error_msg;
if (identifier_p (expr))
/* Lookup the name we got back from the id-expression. */
expr = cp_parser_lookup_name_simple (parser, expr,
id_expr_start_token->location);
if (expr
&& expr != error_mark_node
&& TREE_CODE (expr) != TYPE_DECL
&& (TREE_CODE (expr) != BIT_NOT_EXPR
|| !TYPE_P (TREE_OPERAND (expr, 0)))
&& cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN)
{
/* Complete lookup of the id-expression. */
expr = (finish_id_expression
(id_expression, expr, parser->scope, &idk,
/*integral_constant_expression_p=*/false,
/*allow_non_integral_constant_expression_p=*/true,
&non_integral_constant_expression_p,
/*template_p=*/false,
/*done=*/true,
/*address_p=*/false,
/*template_arg_p=*/false,
&error_msg,
id_expr_start_token->location));
if (expr == error_mark_node)
/* We found an id-expression, but it was something that we
should not have found. This is an error, not something
we can recover from, so note that we found an
id-expression and we'll recover as gracefully as
possible. */
id_expression_or_member_access_p = true;
}
if (expr
&& expr != error_mark_node
&& cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN)
/* We have an id-expression. */
id_expression_or_member_access_p = true;
}
if (!id_expression_or_member_access_p)
{
/* Abort the id-expression parse. */
cp_parser_abort_tentative_parse (parser);
/* Parsing tentatively, again. */
cp_parser_parse_tentatively (parser);
/* Parse a class member access. */
expr = cp_parser_postfix_expression (parser, /*address_p=*/false,
/*cast_p=*/false, /*decltype*/true,
/*member_access_only_p=*/true, NULL);
if (expr
&& expr != error_mark_node
&& cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN)
/* We have an id-expression. */
id_expression_or_member_access_p = true;
}
if (id_expression_or_member_access_p)
/* We have parsed the complete id-expression or member access. */
cp_parser_parse_definitely (parser);
else
{
/* Abort our attempt to parse an id-expression or member access
expression. */
cp_parser_abort_tentative_parse (parser);
/* Parse a full expression. */
expr = cp_parser_expression (parser, /*pidk=*/NULL, /*cast_p=*/false,
/*decltype_p=*/true);
}
return expr;
}
/* Parse a `decltype' type. Returns the type.
simple-type-specifier:
decltype ( expression )
C++14 proposal:
decltype ( auto ) */
static tree
cp_parser_decltype (cp_parser *parser)
{
tree expr;
bool id_expression_or_member_access_p = false;
const char *saved_message;
bool saved_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
bool saved_greater_than_is_operator_p;
cp_token *start_token = cp_lexer_peek_token (parser->lexer);
if (start_token->type == CPP_DECLTYPE)
{
/* Already parsed. */
cp_lexer_consume_token (parser->lexer);
return start_token->u.value;
}
/* Look for the `decltype' token. */
if (!cp_parser_require_keyword (parser, RID_DECLTYPE, RT_DECLTYPE))
return error_mark_node;
/* Parse the opening `('. */
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return error_mark_node;
/* decltype (auto) */
if (cxx_dialect >= cxx14
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_AUTO))
{
cp_lexer_consume_token (parser->lexer);
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
return error_mark_node;
expr = make_decltype_auto ();
AUTO_IS_DECLTYPE (expr) = true;
goto rewrite;
}
/* Types cannot be defined in a `decltype' expression. Save away the
old message. */
saved_message = parser->type_definition_forbidden_message;
/* And create the new one. */
parser->type_definition_forbidden_message
= G_("types may not be defined in %<decltype%> expressions");
/* The restrictions on constant-expressions do not apply inside
decltype expressions. */
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
/* Within a parenthesized expression, a `>' token is always
the greater-than operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = true;
/* Do not actually evaluate the expression. */
++cp_unevaluated_operand;
/* Do not warn about problems with the expression. */
++c_inhibit_evaluation_warnings;
expr = cp_parser_decltype_expr (parser, id_expression_or_member_access_p);
/* Go back to evaluating expressions. */
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
/* The `>' token might be the end of a template-id or
template-parameter-list now. */
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
/* Restore the old message and the integral constant expression
flags. */
parser->type_definition_forbidden_message = saved_message;
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
/* Parse to the closing `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
return error_mark_node;
}
expr = finish_decltype_type (expr, id_expression_or_member_access_p,
tf_warning_or_error);
rewrite:
/* Replace the decltype with a CPP_DECLTYPE so we don't need to parse
it again. */
start_token->type = CPP_DECLTYPE;
start_token->u.value = expr;
start_token->keyword = RID_MAX;
cp_lexer_purge_tokens_after (parser->lexer, start_token);
return expr;
}
/* Special member functions [gram.special] */
/* Parse a conversion-function-id.
conversion-function-id:
operator conversion-type-id
Returns an IDENTIFIER_NODE representing the operator. */
static tree
cp_parser_conversion_function_id (cp_parser* parser)
{
tree type;
tree saved_scope;
tree saved_qualifying_scope;
tree saved_object_scope;
tree pushed_scope = NULL_TREE;
/* Look for the `operator' token. */
if (!cp_parser_require_keyword (parser, RID_OPERATOR, RT_OPERATOR))
return error_mark_node;
/* When we parse the conversion-type-id, the current scope will be
reset. However, we need that information in able to look up the
conversion function later, so we save it here. */
saved_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
saved_object_scope = parser->object_scope;
/* We must enter the scope of the class so that the names of
entities declared within the class are available in the
conversion-type-id. For example, consider:
struct S {
typedef int I;
operator I();
};
S::operator I() { ... }
In order to see that `I' is a type-name in the definition, we
must be in the scope of `S'. */
if (saved_scope)
pushed_scope = push_scope (saved_scope);
/* Parse the conversion-type-id. */
type = cp_parser_conversion_type_id (parser);
/* Leave the scope of the class, if any. */
if (pushed_scope)
pop_scope (pushed_scope);
/* Restore the saved scope. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
/* If the TYPE is invalid, indicate failure. */
if (type == error_mark_node)
return error_mark_node;
return mangle_conv_op_name_for_type (type);
}
/* Parse a conversion-type-id:
conversion-type-id:
type-specifier-seq conversion-declarator [opt]
Returns the TYPE specified. */
static tree
cp_parser_conversion_type_id (cp_parser* parser)
{
tree attributes;
cp_decl_specifier_seq type_specifiers;
cp_declarator *declarator;
tree type_specified;
const char *saved_message;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in a conversion-type-id");
/* Parse the type-specifiers. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/false,
/*is_trailing_return=*/false,
&type_specifiers);
parser->type_definition_forbidden_message = saved_message;
/* If that didn't work, stop. */
if (type_specifiers.type == error_mark_node)
return error_mark_node;
/* Parse the conversion-declarator. */
declarator = cp_parser_conversion_declarator_opt (parser);
type_specified = grokdeclarator (declarator, &type_specifiers, TYPENAME,
/*initialized=*/0, &attributes);
if (attributes)
cplus_decl_attributes (&type_specified, attributes, /*flags=*/0);
/* Don't give this error when parsing tentatively. This happens to
work because we always parse this definitively once. */
if (! cp_parser_uncommitted_to_tentative_parse_p (parser)
&& type_uses_auto (type_specified))
{
if (cxx_dialect < cxx14)
{
error ("invalid use of %<auto%> in conversion operator");
return error_mark_node;
}
else if (template_parm_scope_p ())
warning (0, "use of %<auto%> in member template "
"conversion operator can never be deduced");
}
return type_specified;
}
/* Parse an (optional) conversion-declarator.
conversion-declarator:
ptr-operator conversion-declarator [opt]
*/
static cp_declarator *
cp_parser_conversion_declarator_opt (cp_parser* parser)
{
enum tree_code code;
tree class_type, std_attributes = NULL_TREE;
cp_cv_quals cv_quals;
/* We don't know if there's a ptr-operator next, or not. */
cp_parser_parse_tentatively (parser);
/* Try the ptr-operator. */
code = cp_parser_ptr_operator (parser, &class_type, &cv_quals,
&std_attributes);
/* If it worked, look for more conversion-declarators. */
if (cp_parser_parse_definitely (parser))
{
cp_declarator *declarator;
/* Parse another optional declarator. */
declarator = cp_parser_conversion_declarator_opt (parser);
declarator = cp_parser_make_indirect_declarator
(code, class_type, cv_quals, declarator, std_attributes);
return declarator;
}
return NULL;
}
/* Parse an (optional) ctor-initializer.
ctor-initializer:
: mem-initializer-list
Returns TRUE iff the ctor-initializer was actually present. */
static bool
cp_parser_ctor_initializer_opt (cp_parser* parser)
{
/* If the next token is not a `:', then there is no
ctor-initializer. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
{
/* Do default initialization of any bases and members. */
if (DECL_CONSTRUCTOR_P (current_function_decl))
finish_mem_initializers (NULL_TREE);
return false;
}
/* Consume the `:' token. */
cp_lexer_consume_token (parser->lexer);
/* And the mem-initializer-list. */
cp_parser_mem_initializer_list (parser);
return true;
}
/* Parse a mem-initializer-list.
mem-initializer-list:
mem-initializer ... [opt]
mem-initializer ... [opt] , mem-initializer-list */
static void
cp_parser_mem_initializer_list (cp_parser* parser)
{
tree mem_initializer_list = NULL_TREE;
tree target_ctor = error_mark_node;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Let the semantic analysis code know that we are starting the
mem-initializer-list. */
if (!DECL_CONSTRUCTOR_P (current_function_decl))
error_at (token->location,
"only constructors take member initializers");
/* Loop through the list. */
while (true)
{
tree mem_initializer;
token = cp_lexer_peek_token (parser->lexer);
/* Parse the mem-initializer. */
mem_initializer = cp_parser_mem_initializer (parser);
/* If the next token is a `...', we're expanding member initializers. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
/* The TREE_PURPOSE must be a _TYPE, because base-specifiers
can be expanded but members cannot. */
if (mem_initializer != error_mark_node
&& !TYPE_P (TREE_PURPOSE (mem_initializer)))
{
error_at (token->location,
"cannot expand initializer for member %<%D%>",
TREE_PURPOSE (mem_initializer));
mem_initializer = error_mark_node;
}
/* Construct the pack expansion type. */
if (mem_initializer != error_mark_node)
mem_initializer = make_pack_expansion (mem_initializer);
}
if (target_ctor != error_mark_node
&& mem_initializer != error_mark_node)
{
error ("mem-initializer for %qD follows constructor delegation",
TREE_PURPOSE (mem_initializer));
mem_initializer = error_mark_node;
}
/* Look for a target constructor. */
if (mem_initializer != error_mark_node
&& CLASS_TYPE_P (TREE_PURPOSE (mem_initializer))
&& same_type_p (TREE_PURPOSE (mem_initializer), current_class_type))
{
maybe_warn_cpp0x (CPP0X_DELEGATING_CTORS);
if (mem_initializer_list)
{
error ("constructor delegation follows mem-initializer for %qD",
TREE_PURPOSE (mem_initializer_list));
mem_initializer = error_mark_node;
}
target_ctor = mem_initializer;
}
/* Add it to the list, unless it was erroneous. */
if (mem_initializer != error_mark_node)
{
TREE_CHAIN (mem_initializer) = mem_initializer_list;
mem_initializer_list = mem_initializer;
}
/* If the next token is not a `,', we're done. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
/* Perform semantic analysis. */
if (DECL_CONSTRUCTOR_P (current_function_decl))
finish_mem_initializers (mem_initializer_list);
}
/* Parse a mem-initializer.
mem-initializer:
mem-initializer-id ( expression-list [opt] )
mem-initializer-id braced-init-list
GNU extension:
mem-initializer:
( expression-list [opt] )
Returns a TREE_LIST. The TREE_PURPOSE is the TYPE (for a base
class) or FIELD_DECL (for a non-static data member) to initialize;
the TREE_VALUE is the expression-list. An empty initialization
list is represented by void_list_node. */
static tree
cp_parser_mem_initializer (cp_parser* parser)
{
tree mem_initializer_id;
tree expression_list;
tree member;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Find out what is being initialized. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
permerror (token->location,
"anachronistic old-style base class initializer");
mem_initializer_id = NULL_TREE;
}
else
{
mem_initializer_id = cp_parser_mem_initializer_id (parser);
if (mem_initializer_id == error_mark_node)
return mem_initializer_id;
}
member = expand_member_init (mem_initializer_id);
if (member && !DECL_P (member))
in_base_initializer = 1;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
bool expr_non_constant_p;
cp_lexer_set_source_position (parser->lexer);
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
expression_list = cp_parser_braced_list (parser, &expr_non_constant_p);
CONSTRUCTOR_IS_DIRECT_INIT (expression_list) = 1;
expression_list = build_tree_list (NULL_TREE, expression_list);
}
else
{
vec<tree, va_gc> *vec;
vec = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/false,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL);
if (vec == NULL)
return error_mark_node;
expression_list = build_tree_list_vec (vec);
release_tree_vector (vec);
}
if (expression_list == error_mark_node)
return error_mark_node;
if (!expression_list)
expression_list = void_type_node;
in_base_initializer = 0;
return member ? build_tree_list (member, expression_list) : error_mark_node;
}
/* Parse a mem-initializer-id.
mem-initializer-id:
:: [opt] nested-name-specifier [opt] class-name
identifier
Returns a TYPE indicating the class to be initializer for the first
production. Returns an IDENTIFIER_NODE indicating the data member
to be initialized for the second production. */
static tree
cp_parser_mem_initializer_id (cp_parser* parser)
{
bool global_scope_p;
bool nested_name_specifier_p;
bool template_p = false;
tree id;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* `typename' is not allowed in this context ([temp.res]). */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME))
{
error_at (token->location,
"keyword %<typename%> not allowed in this context (a qualified "
"member initializer is implicitly a type)");
cp_lexer_consume_token (parser->lexer);
}
/* Look for the optional `::' operator. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the optional nested-name-specifier. The simplest way to
implement:
[temp.res]
The keyword `typename' is not permitted in a base-specifier or
mem-initializer; in these contexts a qualified name that
depends on a template-parameter is implicitly assumed to be a
type name.
is to assume that we have seen the `typename' keyword at this
point. */
nested_name_specifier_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
/*type_p=*/true,
/*is_declaration=*/true)
!= NULL_TREE);
if (nested_name_specifier_p)
template_p = cp_parser_optional_template_keyword (parser);
/* If there is a `::' operator or a nested-name-specifier, then we
are definitely looking for a class-name. */
if (global_scope_p || nested_name_specifier_p)
return cp_parser_class_name (parser,
/*typename_keyword_p=*/true,
/*template_keyword_p=*/template_p,
typename_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/true);
/* Otherwise, we could also be looking for an ordinary identifier. */
cp_parser_parse_tentatively (parser);
/* Try a class-name. */
id = cp_parser_class_name (parser,
/*typename_keyword_p=*/true,
/*template_keyword_p=*/false,
none_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/true);
/* If we found one, we're done. */
if (cp_parser_parse_definitely (parser))
return id;
/* Otherwise, look for an ordinary identifier. */
return cp_parser_identifier (parser);
}
/* Overloading [gram.over] */
/* Parse an operator-function-id.
operator-function-id:
operator operator
Returns an IDENTIFIER_NODE for the operator which is a
human-readable spelling of the identifier, e.g., `operator +'. */
static tree
cp_parser_operator_function_id (cp_parser* parser)
{
/* Look for the `operator' keyword. */
if (!cp_parser_require_keyword (parser, RID_OPERATOR, RT_OPERATOR))
return error_mark_node;
/* And then the name of the operator itself. */
return cp_parser_operator (parser);
}
/* Return an identifier node for a user-defined literal operator.
The suffix identifier is chained to the operator name identifier. */
static tree
cp_literal_operator_id (const char* name)
{
tree identifier;
char *buffer = XNEWVEC (char, strlen (UDLIT_OP_ANSI_PREFIX)
+ strlen (name) + 10);
sprintf (buffer, UDLIT_OP_ANSI_FORMAT, name);
identifier = get_identifier (buffer);
return identifier;
}
/* Parse an operator.
operator:
new delete new[] delete[] + - * / % ^ & | ~ ! = < >
+= -= *= /= %= ^= &= |= << >> >>= <<= == != <= >= &&
|| ++ -- , ->* -> () []
GNU Extensions:
operator:
<? >? <?= >?=
Returns an IDENTIFIER_NODE for the operator which is a
human-readable spelling of the identifier, e.g., `operator +'. */
static tree
cp_parser_operator (cp_parser* parser)
{
tree id = NULL_TREE;
cp_token *token;
bool utf8 = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Figure out which operator we have. */
switch (token->type)
{
case CPP_KEYWORD:
{
enum tree_code op;
/* The keyword should be either `new' or `delete'. */
if (token->keyword == RID_NEW)
op = NEW_EXPR;
else if (token->keyword == RID_DELETE)
op = DELETE_EXPR;
else
break;
/* Consume the `new' or `delete' token. */
cp_lexer_consume_token (parser->lexer);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `[' token then this is the array variant of the
operator. */
if (token->type == CPP_OPEN_SQUARE)
{
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `]' token. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
id = ansi_opname (op == NEW_EXPR
? VEC_NEW_EXPR : VEC_DELETE_EXPR);
}
/* Otherwise, we have the non-array variant. */
else
id = ansi_opname (op);
return id;
}
case CPP_PLUS:
id = ansi_opname (PLUS_EXPR);
break;
case CPP_MINUS:
id = ansi_opname (MINUS_EXPR);
break;
case CPP_MULT:
id = ansi_opname (MULT_EXPR);
break;
case CPP_DIV:
id = ansi_opname (TRUNC_DIV_EXPR);
break;
case CPP_MOD:
id = ansi_opname (TRUNC_MOD_EXPR);
break;
case CPP_XOR:
id = ansi_opname (BIT_XOR_EXPR);
break;
case CPP_AND:
id = ansi_opname (BIT_AND_EXPR);
break;
case CPP_OR:
id = ansi_opname (BIT_IOR_EXPR);
break;
case CPP_COMPL:
id = ansi_opname (BIT_NOT_EXPR);
break;
case CPP_NOT:
id = ansi_opname (TRUTH_NOT_EXPR);
break;
case CPP_EQ:
id = ansi_assopname (NOP_EXPR);
break;
case CPP_LESS:
id = ansi_opname (LT_EXPR);
break;
case CPP_GREATER:
id = ansi_opname (GT_EXPR);
break;
case CPP_PLUS_EQ:
id = ansi_assopname (PLUS_EXPR);
break;
case CPP_MINUS_EQ:
id = ansi_assopname (MINUS_EXPR);
break;
case CPP_MULT_EQ:
id = ansi_assopname (MULT_EXPR);
break;
case CPP_DIV_EQ:
id = ansi_assopname (TRUNC_DIV_EXPR);
break;
case CPP_MOD_EQ:
id = ansi_assopname (TRUNC_MOD_EXPR);
break;
case CPP_XOR_EQ:
id = ansi_assopname (BIT_XOR_EXPR);
break;
case CPP_AND_EQ:
id = ansi_assopname (BIT_AND_EXPR);
break;
case CPP_OR_EQ:
id = ansi_assopname (BIT_IOR_EXPR);
break;
case CPP_LSHIFT:
id = ansi_opname (LSHIFT_EXPR);
break;
case CPP_RSHIFT:
id = ansi_opname (RSHIFT_EXPR);
break;
case CPP_LSHIFT_EQ:
id = ansi_assopname (LSHIFT_EXPR);
break;
case CPP_RSHIFT_EQ:
id = ansi_assopname (RSHIFT_EXPR);
break;
case CPP_EQ_EQ:
id = ansi_opname (EQ_EXPR);
break;
case CPP_NOT_EQ:
id = ansi_opname (NE_EXPR);
break;
case CPP_LESS_EQ:
id = ansi_opname (LE_EXPR);
break;
case CPP_GREATER_EQ:
id = ansi_opname (GE_EXPR);
break;
case CPP_AND_AND:
id = ansi_opname (TRUTH_ANDIF_EXPR);
break;
case CPP_OR_OR:
id = ansi_opname (TRUTH_ORIF_EXPR);
break;
case CPP_PLUS_PLUS:
id = ansi_opname (POSTINCREMENT_EXPR);
break;
case CPP_MINUS_MINUS:
id = ansi_opname (PREDECREMENT_EXPR);
break;
case CPP_COMMA:
id = ansi_opname (COMPOUND_EXPR);
break;
case CPP_DEREF_STAR:
id = ansi_opname (MEMBER_REF);
break;
case CPP_DEREF:
id = ansi_opname (COMPONENT_REF);
break;
case CPP_OPEN_PAREN:
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Look for the matching `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return ansi_opname (CALL_EXPR);
case CPP_OPEN_SQUARE:
/* Consume the `['. */
cp_lexer_consume_token (parser->lexer);
/* Look for the matching `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
return ansi_opname (ARRAY_REF);
case CPP_UTF8STRING:
case CPP_UTF8STRING_USERDEF:
utf8 = true;
case CPP_STRING:
case CPP_WSTRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_STRING_USERDEF:
case CPP_WSTRING_USERDEF:
case CPP_STRING16_USERDEF:
case CPP_STRING32_USERDEF:
{
tree str, string_tree;
int sz, len;
if (cxx_dialect == cxx98)
maybe_warn_cpp0x (CPP0X_USER_DEFINED_LITERALS);
/* Consume the string. */
str = cp_parser_string_literal (parser, /*translate=*/true,
/*wide_ok=*/true, /*lookup_udlit=*/false);
if (str == error_mark_node)
return error_mark_node;
else if (TREE_CODE (str) == USERDEF_LITERAL)
{
string_tree = USERDEF_LITERAL_VALUE (str);
id = USERDEF_LITERAL_SUFFIX_ID (str);
}
else
{
string_tree = str;
/* Look for the suffix identifier. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME)
id = cp_parser_identifier (parser);
else if (token->type == CPP_KEYWORD)
{
error ("unexpected keyword;"
" remove space between quotes and suffix identifier");
return error_mark_node;
}
else
{
error ("expected suffix identifier");
return error_mark_node;
}
}
sz = TREE_INT_CST_LOW (TYPE_SIZE_UNIT
(TREE_TYPE (TREE_TYPE (string_tree))));
len = TREE_STRING_LENGTH (string_tree) / sz - 1;
if (len != 0)
{
error ("expected empty string after %<operator%> keyword");
return error_mark_node;
}
if (utf8 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (string_tree)))
!= char_type_node)
{
error ("invalid encoding prefix in literal operator");
return error_mark_node;
}
if (id != error_mark_node)
{
const char *name = IDENTIFIER_POINTER (id);
id = cp_literal_operator_id (name);
}
return id;
}
default:
/* Anything else is an error. */
break;
}
/* If we have selected an identifier, we need to consume the
operator token. */
if (id)
cp_lexer_consume_token (parser->lexer);
/* Otherwise, no valid operator name was present. */
else
{
cp_parser_error (parser, "expected operator");
id = error_mark_node;
}
return id;
}
/* Parse a template-declaration.
template-declaration:
export [opt] template < template-parameter-list > declaration
If MEMBER_P is TRUE, this template-declaration occurs within a
class-specifier.
The grammar rule given by the standard isn't correct. What
is really meant is:
template-declaration:
export [opt] template-parameter-list-seq
decl-specifier-seq [opt] init-declarator [opt] ;
export [opt] template-parameter-list-seq
function-definition
template-parameter-list-seq:
template-parameter-list-seq [opt]
template < template-parameter-list > */
static void
cp_parser_template_declaration (cp_parser* parser, bool member_p)
{
/* Check for `export'. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXPORT))
{
/* Consume the `export' token. */
cp_lexer_consume_token (parser->lexer);
/* Warn that we do not support `export'. */
warning (0, "keyword %<export%> not implemented, and will be ignored");
}
cp_parser_template_declaration_after_export (parser, member_p);
}
/* Parse a template-parameter-list.
template-parameter-list:
template-parameter
template-parameter-list , template-parameter
Returns a TREE_LIST. Each node represents a template parameter.
The nodes are connected via their TREE_CHAINs. */
static tree
cp_parser_template_parameter_list (cp_parser* parser)
{
tree parameter_list = NULL_TREE;
begin_template_parm_list ();
/* The loop below parses the template parms. We first need to know
the total number of template parms to be able to compute proper
canonical types of each dependent type. So after the loop, when
we know the total number of template parms,
end_template_parm_list computes the proper canonical types and
fixes up the dependent types accordingly. */
while (true)
{
tree parameter;
bool is_non_type;
bool is_parameter_pack;
location_t parm_loc;
/* Parse the template-parameter. */
parm_loc = cp_lexer_peek_token (parser->lexer)->location;
parameter = cp_parser_template_parameter (parser,
&is_non_type,
&is_parameter_pack);
/* Add it to the list. */
if (parameter != error_mark_node)
parameter_list = process_template_parm (parameter_list,
parm_loc,
parameter,
is_non_type,
is_parameter_pack);
else
{
tree err_parm = build_tree_list (parameter, parameter);
parameter_list = chainon (parameter_list, err_parm);
}
/* If the next token is not a `,', we're done. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Otherwise, consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return end_template_parm_list (parameter_list);
}
/* Parse a template-parameter.
template-parameter:
type-parameter
parameter-declaration
If all goes well, returns a TREE_LIST. The TREE_VALUE represents
the parameter. The TREE_PURPOSE is the default value, if any.
Returns ERROR_MARK_NODE on failure. *IS_NON_TYPE is set to true
iff this parameter is a non-type parameter. *IS_PARAMETER_PACK is
set to true iff this parameter is a parameter pack. */
static tree
cp_parser_template_parameter (cp_parser* parser, bool *is_non_type,
bool *is_parameter_pack)
{
cp_token *token;
cp_parameter_declarator *parameter_declarator;
cp_declarator *id_declarator;
tree parm;
/* Assume it is a type parameter or a template parameter. */
*is_non_type = false;
/* Assume it not a parameter pack. */
*is_parameter_pack = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it is `class' or `template', we have a type-parameter. */
if (token->keyword == RID_TEMPLATE)
return cp_parser_type_parameter (parser, is_parameter_pack);
/* If it is `class' or `typename' we do not know yet whether it is a
type parameter or a non-type parameter. Consider:
template <typename T, typename T::X X> ...
or:
template <class C, class D*> ...
Here, the first parameter is a type parameter, and the second is
a non-type parameter. We can tell by looking at the token after
the identifier -- if it is a `,', `=', or `>' then we have a type
parameter. */
if (token->keyword == RID_TYPENAME || token->keyword == RID_CLASS)
{
/* Peek at the token after `class' or `typename'. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
/* If it's an ellipsis, we have a template type parameter
pack. */
if (token->type == CPP_ELLIPSIS)
return cp_parser_type_parameter (parser, is_parameter_pack);
/* If it's an identifier, skip it. */
if (token->type == CPP_NAME)
token = cp_lexer_peek_nth_token (parser->lexer, 3);
/* Now, see if the token looks like the end of a template
parameter. */
if (token->type == CPP_COMMA
|| token->type == CPP_EQ
|| token->type == CPP_GREATER)
return cp_parser_type_parameter (parser, is_parameter_pack);
}
/* Otherwise, it is a non-type parameter.
[temp.param]
When parsing a default template-argument for a non-type
template-parameter, the first non-nested `>' is taken as the end
of the template parameter-list rather than a greater-than
operator. */
*is_non_type = true;
parameter_declarator
= cp_parser_parameter_declaration (parser, /*template_parm_p=*/true,
/*parenthesized_p=*/NULL);
if (!parameter_declarator)
return error_mark_node;
/* If the parameter declaration is marked as a parameter pack, set
*IS_PARAMETER_PACK to notify the caller. Also, unmark the
declarator's PACK_EXPANSION_P, otherwise we'll get errors from
grokdeclarator. */
if (parameter_declarator->declarator
&& parameter_declarator->declarator->parameter_pack_p)
{
*is_parameter_pack = true;
parameter_declarator->declarator->parameter_pack_p = false;
}
if (parameter_declarator->default_argument)
{
/* Can happen in some cases of erroneous input (c++/34892). */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
/* Consume the `...' for better error recovery. */
cp_lexer_consume_token (parser->lexer);
}
/* If the next token is an ellipsis, and we don't already have it
marked as a parameter pack, then we have a parameter pack (that
has no declarator). */
else if (!*is_parameter_pack
&& cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)
&& (declarator_can_be_parameter_pack
(parameter_declarator->declarator)))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
*is_parameter_pack = true;
}
/* We might end up with a pack expansion as the type of the non-type
template parameter, in which case this is a non-type template
parameter pack. */
else if (parameter_declarator->decl_specifiers.type
&& PACK_EXPANSION_P (parameter_declarator->decl_specifiers.type))
{
*is_parameter_pack = true;
parameter_declarator->decl_specifiers.type =
PACK_EXPANSION_PATTERN (parameter_declarator->decl_specifiers.type);
}
if (*is_parameter_pack && cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* Parameter packs cannot have default arguments. However, a
user may try to do so, so we'll parse them and give an
appropriate diagnostic here. */
cp_token *start_token = cp_lexer_peek_token (parser->lexer);
/* Find the name of the parameter pack. */
id_declarator = parameter_declarator->declarator;
while (id_declarator && id_declarator->kind != cdk_id)
id_declarator = id_declarator->declarator;
if (id_declarator && id_declarator->kind == cdk_id)
error_at (start_token->location,
"template parameter pack %qD cannot have a default argument",
id_declarator->u.id.unqualified_name);
else
error_at (start_token->location,
"template parameter pack cannot have a default argument");
/* Parse the default argument, but throw away the result. */
cp_parser_default_argument (parser, /*template_parm_p=*/true);
}
parm = grokdeclarator (parameter_declarator->declarator,
¶meter_declarator->decl_specifiers,
TPARM, /*initialized=*/0,
/*attrlist=*/NULL);
if (parm == error_mark_node)
return error_mark_node;
return build_tree_list (parameter_declarator->default_argument, parm);
}
/* Parse a type-parameter.
type-parameter:
class identifier [opt]
class identifier [opt] = type-id
typename identifier [opt]
typename identifier [opt] = type-id
template < template-parameter-list > class identifier [opt]
template < template-parameter-list > class identifier [opt]
= id-expression
GNU Extension (variadic templates):
type-parameter:
class ... identifier [opt]
typename ... identifier [opt]
Returns a TREE_LIST. The TREE_VALUE is itself a TREE_LIST. The
TREE_PURPOSE is the default-argument, if any. The TREE_VALUE is
the declaration of the parameter.
Sets *IS_PARAMETER_PACK if this is a template parameter pack. */
static tree
cp_parser_type_parameter (cp_parser* parser, bool *is_parameter_pack)
{
cp_token *token;
tree parameter;
/* Look for a keyword to tell us what kind of parameter this is. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_CLASS_TYPENAME_TEMPLATE);
if (!token)
return error_mark_node;
switch (token->keyword)
{
case RID_CLASS:
case RID_TYPENAME:
{
tree identifier;
tree default_argument;
/* If the next token is an ellipsis, we have a template
argument pack. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
*is_parameter_pack = true;
}
/* If the next token is an identifier, then it names the
parameter. */
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
identifier = cp_parser_identifier (parser);
else
identifier = NULL_TREE;
/* Create the parameter. */
parameter = finish_template_type_parm (class_type_node, identifier);
/* If the next token is an `=', we have a default argument. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* Consume the `=' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the default-argument. */
push_deferring_access_checks (dk_no_deferred);
default_argument = cp_parser_type_id (parser);
/* Template parameter packs cannot have default
arguments. */
if (*is_parameter_pack)
{
if (identifier)
error_at (token->location,
"template parameter pack %qD cannot have a "
"default argument", identifier);
else
error_at (token->location,
"template parameter packs cannot have "
"default arguments");
default_argument = NULL_TREE;
}
else if (check_for_bare_parameter_packs (default_argument))
default_argument = error_mark_node;
pop_deferring_access_checks ();
}
else
default_argument = NULL_TREE;
/* Create the combined representation of the parameter and the
default argument. */
parameter = build_tree_list (default_argument, parameter);
}
break;
case RID_TEMPLATE:
{
tree identifier;
tree default_argument;
/* Look for the `<'. */
cp_parser_require (parser, CPP_LESS, RT_LESS);
/* Parse the template-parameter-list. */
cp_parser_template_parameter_list (parser);
/* Look for the `>'. */
cp_parser_require (parser, CPP_GREATER, RT_GREATER);
/* Look for the `class' or 'typename' keywords. */
cp_parser_type_parameter_key (parser);
/* If the next token is an ellipsis, we have a template
argument pack. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
*is_parameter_pack = true;
}
/* If the next token is an `=', then there is a
default-argument. If the next token is a `>', we are at
the end of the parameter-list. If the next token is a `,',
then we are at the end of this parameter. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_GREATER)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
{
identifier = cp_parser_identifier (parser);
/* Treat invalid names as if the parameter were nameless. */
if (identifier == error_mark_node)
identifier = NULL_TREE;
}
else
identifier = NULL_TREE;
/* Create the template parameter. */
parameter = finish_template_template_parm (class_type_node,
identifier);
/* If the next token is an `=', then there is a
default-argument. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
bool is_template;
/* Consume the `='. */
cp_lexer_consume_token (parser->lexer);
/* Parse the id-expression. */
push_deferring_access_checks (dk_no_deferred);
/* save token before parsing the id-expression, for error
reporting */
token = cp_lexer_peek_token (parser->lexer);
default_argument
= cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/&is_template,
/*declarator_p=*/false,
/*optional_p=*/false);
if (TREE_CODE (default_argument) == TYPE_DECL)
/* If the id-expression was a template-id that refers to
a template-class, we already have the declaration here,
so no further lookup is needed. */
;
else
/* Look up the name. */
default_argument
= cp_parser_lookup_name (parser, default_argument,
none_type,
/*is_template=*/is_template,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
token->location);
/* See if the default argument is valid. */
default_argument
= check_template_template_default_arg (default_argument);
/* Template parameter packs cannot have default
arguments. */
if (*is_parameter_pack)
{
if (identifier)
error_at (token->location,
"template parameter pack %qD cannot "
"have a default argument",
identifier);
else
error_at (token->location, "template parameter packs cannot "
"have default arguments");
default_argument = NULL_TREE;
}
pop_deferring_access_checks ();
}
else
default_argument = NULL_TREE;
/* Create the combined representation of the parameter and the
default argument. */
parameter = build_tree_list (default_argument, parameter);
}
break;
default:
gcc_unreachable ();
break;
}
return parameter;
}
/* Parse a template-id.
template-id:
template-name < template-argument-list [opt] >
If TEMPLATE_KEYWORD_P is TRUE, then we have just seen the
`template' keyword. In this case, a TEMPLATE_ID_EXPR will be
returned. Otherwise, if the template-name names a function, or set
of functions, returns a TEMPLATE_ID_EXPR. If the template-name
names a class, returns a TYPE_DECL for the specialization.
If CHECK_DEPENDENCY_P is FALSE, names are looked up in
uninstantiated templates. */
static tree
cp_parser_template_id (cp_parser *parser,
bool template_keyword_p,
bool check_dependency_p,
enum tag_types tag_type,
bool is_declaration)
{
int i;
tree templ;
tree arguments;
tree template_id;
cp_token_position start_of_id = 0;
deferred_access_check *chk;
vec<deferred_access_check, va_gc> *access_check;
cp_token *next_token = NULL, *next_token_2 = NULL;
bool is_identifier;
/* If the next token corresponds to a template-id, there is no need
to reparse it. */
next_token = cp_lexer_peek_token (parser->lexer);
if (next_token->type == CPP_TEMPLATE_ID)
{
struct tree_check *check_value;
/* Get the stored value. */
check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value;
/* Perform any access checks that were deferred. */
access_check = check_value->checks;
if (access_check)
{
FOR_EACH_VEC_ELT (*access_check, i, chk)
perform_or_defer_access_check (chk->binfo,
chk->decl,
chk->diag_decl,
tf_warning_or_error);
}
/* Return the stored value. */
return check_value->value;
}
/* Avoid performing name lookup if there is no possibility of
finding a template-id. */
if ((next_token->type != CPP_NAME && next_token->keyword != RID_OPERATOR)
|| (next_token->type == CPP_NAME
&& !cp_parser_nth_token_starts_template_argument_list_p
(parser, 2)))
{
cp_parser_error (parser, "expected template-id");
return error_mark_node;
}
/* Remember where the template-id starts. */
if (cp_parser_uncommitted_to_tentative_parse_p (parser))
start_of_id = cp_lexer_token_position (parser->lexer, false);
push_deferring_access_checks (dk_deferred);
/* Parse the template-name. */
is_identifier = false;
templ = cp_parser_template_name (parser, template_keyword_p,
check_dependency_p,
is_declaration,
tag_type,
&is_identifier);
if (templ == error_mark_node || is_identifier)
{
pop_deferring_access_checks ();
return templ;
}
/* If we find the sequence `[:' after a template-name, it's probably
a digraph-typo for `< ::'. Substitute the tokens and check if we can
parse correctly the argument list. */
next_token = cp_lexer_peek_token (parser->lexer);
next_token_2 = cp_lexer_peek_nth_token (parser->lexer, 2);
if (next_token->type == CPP_OPEN_SQUARE
&& next_token->flags & DIGRAPH
&& next_token_2->type == CPP_COLON
&& !(next_token_2->flags & PREV_WHITE))
{
cp_parser_parse_tentatively (parser);
/* Change `:' into `::'. */
next_token_2->type = CPP_SCOPE;
/* Consume the first token (CPP_OPEN_SQUARE - which we pretend it is
CPP_LESS. */
cp_lexer_consume_token (parser->lexer);
/* Parse the arguments. */
arguments = cp_parser_enclosed_template_argument_list (parser);
if (!cp_parser_parse_definitely (parser))
{
/* If we couldn't parse an argument list, then we revert our changes
and return simply an error. Maybe this is not a template-id
after all. */
next_token_2->type = CPP_COLON;
cp_parser_error (parser, "expected %<<%>");
pop_deferring_access_checks ();
return error_mark_node;
}
/* Otherwise, emit an error about the invalid digraph, but continue
parsing because we got our argument list. */
if (permerror (next_token->location,
"%<<::%> cannot begin a template-argument list"))
{
static bool hint = false;
inform (next_token->location,
"%<<:%> is an alternate spelling for %<[%>."
" Insert whitespace between %<<%> and %<::%>");
if (!hint && !flag_permissive)
{
inform (next_token->location, "(if you use %<-fpermissive%> "
"or %<-std=c++11%>, or %<-std=gnu++11%> G++ will "
"accept your code)");
hint = true;
}
}
}
else
{
/* Look for the `<' that starts the template-argument-list. */
if (!cp_parser_require (parser, CPP_LESS, RT_LESS))
{
pop_deferring_access_checks ();
return error_mark_node;
}
/* Parse the arguments. */
arguments = cp_parser_enclosed_template_argument_list (parser);
}
/* Build a representation of the specialization. */
if (identifier_p (templ))
template_id = build_min_nt_loc (next_token->location,
TEMPLATE_ID_EXPR,
templ, arguments);
else if (DECL_TYPE_TEMPLATE_P (templ)
|| DECL_TEMPLATE_TEMPLATE_PARM_P (templ))
{
bool entering_scope;
/* In "template <typename T> ... A<T>::", A<T> is the abstract A
template (rather than some instantiation thereof) only if
is not nested within some other construct. For example, in
"template <typename T> void f(T) { A<T>::", A<T> is just an
instantiation of A. */
entering_scope = (template_parm_scope_p ()
&& cp_lexer_next_token_is (parser->lexer,
CPP_SCOPE));
template_id
= finish_template_type (templ, arguments, entering_scope);
}
else if (variable_template_p (templ))
{
template_id = lookup_template_variable (templ, arguments);
}
else
{
/* If it's not a class-template or a template-template, it should be
a function-template. */
gcc_assert ((DECL_FUNCTION_TEMPLATE_P (templ)
|| TREE_CODE (templ) == OVERLOAD
|| BASELINK_P (templ)));
template_id = lookup_template_function (templ, arguments);
}
/* If parsing tentatively, replace the sequence of tokens that makes
up the template-id with a CPP_TEMPLATE_ID token. That way,
should we re-parse the token stream, we will not have to repeat
the effort required to do the parse, nor will we issue duplicate
error messages about problems during instantiation of the
template. */
if (start_of_id
/* Don't do this if we had a parse error in a declarator; re-parsing
might succeed if a name changes meaning (60361). */
&& !(cp_parser_error_occurred (parser)
&& cp_parser_parsing_tentatively (parser)
&& parser->in_declarator_p))
{
cp_token *token = cp_lexer_token_at (parser->lexer, start_of_id);
/* Reset the contents of the START_OF_ID token. */
token->type = CPP_TEMPLATE_ID;
/* Retrieve any deferred checks. Do not pop this access checks yet
so the memory will not be reclaimed during token replacing below. */
token->u.tree_check_value = ggc_cleared_alloc<struct tree_check> ();
token->u.tree_check_value->value = template_id;
token->u.tree_check_value->checks = get_deferred_access_checks ();
token->keyword = RID_MAX;
/* Purge all subsequent tokens. */
cp_lexer_purge_tokens_after (parser->lexer, start_of_id);
/* ??? Can we actually assume that, if template_id ==
error_mark_node, we will have issued a diagnostic to the
user, as opposed to simply marking the tentative parse as
failed? */
if (cp_parser_error_occurred (parser) && template_id != error_mark_node)
error_at (token->location, "parse error in template argument list");
}
pop_to_parent_deferring_access_checks ();
return template_id;
}
/* Parse a template-name.
template-name:
identifier
The standard should actually say:
template-name:
identifier
operator-function-id
A defect report has been filed about this issue.
A conversion-function-id cannot be a template name because they cannot
be part of a template-id. In fact, looking at this code:
a.operator K<int>()
the conversion-function-id is "operator K<int>", and K<int> is a type-id.
It is impossible to call a templated conversion-function-id with an
explicit argument list, since the only allowed template parameter is
the type to which it is converting.
If TEMPLATE_KEYWORD_P is true, then we have just seen the
`template' keyword, in a construction like:
T::template f<3>()
In that case `f' is taken to be a template-name, even though there
is no way of knowing for sure.
Returns the TEMPLATE_DECL for the template, or an OVERLOAD if the
name refers to a set of overloaded functions, at least one of which
is a template, or an IDENTIFIER_NODE with the name of the template,
if TEMPLATE_KEYWORD_P is true. If CHECK_DEPENDENCY_P is FALSE,
names are looked up inside uninstantiated templates. */
static tree
cp_parser_template_name (cp_parser* parser,
bool template_keyword_p,
bool check_dependency_p,
bool is_declaration,
enum tag_types tag_type,
bool *is_identifier)
{
tree identifier;
tree decl;
tree fns;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If the next token is `operator', then we have either an
operator-function-id or a conversion-function-id. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_OPERATOR))
{
/* We don't know whether we're looking at an
operator-function-id or a conversion-function-id. */
cp_parser_parse_tentatively (parser);
/* Try an operator-function-id. */
identifier = cp_parser_operator_function_id (parser);
/* If that didn't work, try a conversion-function-id. */
if (!cp_parser_parse_definitely (parser))
{
cp_parser_error (parser, "expected template-name");
return error_mark_node;
}
}
/* Look for the identifier. */
else
identifier = cp_parser_identifier (parser);
/* If we didn't find an identifier, we don't have a template-id. */
if (identifier == error_mark_node)
return error_mark_node;
/* If the name immediately followed the `template' keyword, then it
is a template-name. However, if the next token is not `<', then
we do not treat it as a template-name, since it is not being used
as part of a template-id. This enables us to handle constructs
like:
template <typename T> struct S { S(); };
template <typename T> S<T>::S();
correctly. We would treat `S' as a template -- if it were `S<T>'
-- but we do not if there is no `<'. */
if (processing_template_decl
&& cp_parser_nth_token_starts_template_argument_list_p (parser, 1))
{
/* In a declaration, in a dependent context, we pretend that the
"template" keyword was present in order to improve error
recovery. For example, given:
template <typename T> void f(T::X<int>);
we want to treat "X<int>" as a template-id. */
if (is_declaration
&& !template_keyword_p
&& parser->scope && TYPE_P (parser->scope)
&& check_dependency_p
&& dependent_scope_p (parser->scope)
/* Do not do this for dtors (or ctors), since they never
need the template keyword before their name. */
&& !constructor_name_p (identifier, parser->scope))
{
cp_token_position start = 0;
/* Explain what went wrong. */
error_at (token->location, "non-template %qD used as template",
identifier);
inform (token->location, "use %<%T::template %D%> to indicate that it is a template",
parser->scope, identifier);
/* If parsing tentatively, find the location of the "<" token. */
if (cp_parser_simulate_error (parser))
start = cp_lexer_token_position (parser->lexer, true);
/* Parse the template arguments so that we can issue error
messages about them. */
cp_lexer_consume_token (parser->lexer);
cp_parser_enclosed_template_argument_list (parser);
/* Skip tokens until we find a good place from which to
continue parsing. */
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/false);
/* If parsing tentatively, permanently remove the
template argument list. That will prevent duplicate
error messages from being issued about the missing
"template" keyword. */
if (start)
cp_lexer_purge_tokens_after (parser->lexer, start);
if (is_identifier)
*is_identifier = true;
return identifier;
}
/* If the "template" keyword is present, then there is generally
no point in doing name-lookup, so we just return IDENTIFIER.
But, if the qualifying scope is non-dependent then we can
(and must) do name-lookup normally. */
if (template_keyword_p
&& (!parser->scope
|| (TYPE_P (parser->scope)
&& dependent_type_p (parser->scope))))
return identifier;
}
/* Look up the name. */
decl = cp_parser_lookup_name (parser, identifier,
tag_type,
/*is_template=*/true,
/*is_namespace=*/false,
check_dependency_p,
/*ambiguous_decls=*/NULL,
token->location);
decl = strip_using_decl (decl);
/* If DECL is a template, then the name was a template-name. */
if (TREE_CODE (decl) == TEMPLATE_DECL)
{
if (TREE_DEPRECATED (decl)
&& deprecated_state != DEPRECATED_SUPPRESS)
warn_deprecated_use (decl, NULL_TREE);
}
else
{
tree fn = NULL_TREE;
/* The standard does not explicitly indicate whether a name that
names a set of overloaded declarations, some of which are
templates, is a template-name. However, such a name should
be a template-name; otherwise, there is no way to form a
template-id for the overloaded templates. */
fns = BASELINK_P (decl) ? BASELINK_FUNCTIONS (decl) : decl;
if (TREE_CODE (fns) == OVERLOAD)
for (fn = fns; fn; fn = OVL_NEXT (fn))
if (TREE_CODE (OVL_CURRENT (fn)) == TEMPLATE_DECL)
break;
if (!fn)
{
/* The name does not name a template. */
cp_parser_error (parser, "expected template-name");
return error_mark_node;
}
}
/* If DECL is dependent, and refers to a function, then just return
its name; we will look it up again during template instantiation. */
if (DECL_FUNCTION_TEMPLATE_P (decl) || !DECL_P (decl))
{
tree scope = ovl_scope (decl);
if (TYPE_P (scope) && dependent_type_p (scope))
return identifier;
}
return decl;
}
/* Parse a template-argument-list.
template-argument-list:
template-argument ... [opt]
template-argument-list , template-argument ... [opt]
Returns a TREE_VEC containing the arguments. */
static tree
cp_parser_template_argument_list (cp_parser* parser)
{
tree fixed_args[10];
unsigned n_args = 0;
unsigned alloced = 10;
tree *arg_ary = fixed_args;
tree vec;
bool saved_in_template_argument_list_p;
bool saved_ice_p;
bool saved_non_ice_p;
saved_in_template_argument_list_p = parser->in_template_argument_list_p;
parser->in_template_argument_list_p = true;
/* Even if the template-id appears in an integral
constant-expression, the contents of the argument list do
not. */
saved_ice_p = parser->integral_constant_expression_p;
parser->integral_constant_expression_p = false;
saved_non_ice_p = parser->non_integral_constant_expression_p;
parser->non_integral_constant_expression_p = false;
/* Parse the arguments. */
do
{
tree argument;
if (n_args)
/* Consume the comma. */
cp_lexer_consume_token (parser->lexer);
/* Parse the template-argument. */
argument = cp_parser_template_argument (parser);
/* If the next token is an ellipsis, we're expanding a template
argument pack. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
if (argument == error_mark_node)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
error_at (token->location,
"expected parameter pack before %<...%>");
}
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
/* Make the argument into a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
argument = make_pack_expansion (argument);
}
if (n_args == alloced)
{
alloced *= 2;
if (arg_ary == fixed_args)
{
arg_ary = XNEWVEC (tree, alloced);
memcpy (arg_ary, fixed_args, sizeof (tree) * n_args);
}
else
arg_ary = XRESIZEVEC (tree, arg_ary, alloced);
}
arg_ary[n_args++] = argument;
}
while (cp_lexer_next_token_is (parser->lexer, CPP_COMMA));
vec = make_tree_vec (n_args);
while (n_args--)
TREE_VEC_ELT (vec, n_args) = arg_ary[n_args];
if (arg_ary != fixed_args)
free (arg_ary);
parser->non_integral_constant_expression_p = saved_non_ice_p;
parser->integral_constant_expression_p = saved_ice_p;
parser->in_template_argument_list_p = saved_in_template_argument_list_p;
#ifdef ENABLE_CHECKING
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec));
#endif
return vec;
}
/* Parse a template-argument.
template-argument:
assignment-expression
type-id
id-expression
The representation is that of an assignment-expression, type-id, or
id-expression -- except that the qualified id-expression is
evaluated, so that the value returned is either a DECL or an
OVERLOAD.
Although the standard says "assignment-expression", it forbids
throw-expressions or assignments in the template argument.
Therefore, we use "conditional-expression" instead. */
static tree
cp_parser_template_argument (cp_parser* parser)
{
tree argument;
bool template_p;
bool address_p;
bool maybe_type_id = false;
cp_token *token = NULL, *argument_start_token = NULL;
location_t loc = 0;
cp_id_kind idk;
/* There's really no way to know what we're looking at, so we just
try each alternative in order.
[temp.arg]
In a template-argument, an ambiguity between a type-id and an
expression is resolved to a type-id, regardless of the form of
the corresponding template-parameter.
Therefore, we try a type-id first. */
cp_parser_parse_tentatively (parser);
argument = cp_parser_template_type_arg (parser);
/* If there was no error parsing the type-id but the next token is a
'>>', our behavior depends on which dialect of C++ we're
parsing. In C++98, we probably found a typo for '> >'. But there
are type-id which are also valid expressions. For instance:
struct X { int operator >> (int); };
template <int V> struct Foo {};
Foo<X () >> 5> r;
Here 'X()' is a valid type-id of a function type, but the user just
wanted to write the expression "X() >> 5". Thus, we remember that we
found a valid type-id, but we still try to parse the argument as an
expression to see what happens.
In C++0x, the '>>' will be considered two separate '>'
tokens. */
if (!cp_parser_error_occurred (parser)
&& cxx_dialect == cxx98
&& cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT))
{
maybe_type_id = true;
cp_parser_abort_tentative_parse (parser);
}
else
{
/* If the next token isn't a `,' or a `>', then this argument wasn't
really finished. This means that the argument is not a valid
type-id. */
if (!cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_error (parser, "expected template-argument");
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
return argument;
}
/* We're still not sure what the argument will be. */
cp_parser_parse_tentatively (parser);
/* Try a template. */
argument_start_token = cp_lexer_peek_token (parser->lexer);
argument = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
&template_p,
/*declarator_p=*/false,
/*optional_p=*/false);
/* If the next token isn't a `,' or a `>', then this argument wasn't
really finished. */
if (!cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_error (parser, "expected template-argument");
if (!cp_parser_error_occurred (parser))
{
/* Figure out what is being referred to. If the id-expression
was for a class template specialization, then we will have a
TYPE_DECL at this point. There is no need to do name lookup
at this point in that case. */
if (TREE_CODE (argument) != TYPE_DECL)
argument = cp_parser_lookup_name (parser, argument,
none_type,
/*is_template=*/template_p,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
argument_start_token->location);
if (TREE_CODE (argument) != TEMPLATE_DECL
&& TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE)
cp_parser_error (parser, "expected template-name");
}
if (cp_parser_parse_definitely (parser))
{
if (TREE_DEPRECATED (argument))
warn_deprecated_use (argument, NULL_TREE);
return argument;
}
/* It must be a non-type argument. There permitted cases are given
in [temp.arg.nontype]:
-- an integral constant-expression of integral or enumeration
type; or
-- the name of a non-type template-parameter; or
-- the name of an object or function with external linkage...
-- the address of an object or function with external linkage...
-- a pointer to member... */
/* Look for a non-type template parameter. */
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
cp_parser_parse_tentatively (parser);
argument = cp_parser_primary_expression (parser,
/*address_p=*/false,
/*cast_p=*/false,
/*template_arg_p=*/true,
&idk);
if (TREE_CODE (argument) != TEMPLATE_PARM_INDEX
|| !cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_simulate_error (parser);
if (cp_parser_parse_definitely (parser))
return argument;
}
/* If the next token is "&", the argument must be the address of an
object or function with external linkage. */
address_p = cp_lexer_next_token_is (parser->lexer, CPP_AND);
if (address_p)
{
loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer);
}
/* See if we might have an id-expression. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME
|| token->keyword == RID_OPERATOR
|| token->type == CPP_SCOPE
|| token->type == CPP_TEMPLATE_ID
|| token->type == CPP_NESTED_NAME_SPECIFIER)
{
cp_parser_parse_tentatively (parser);
argument = cp_parser_primary_expression (parser,
address_p,
/*cast_p=*/false,
/*template_arg_p=*/true,
&idk);
if (cp_parser_error_occurred (parser)
|| !cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_abort_tentative_parse (parser);
else
{
tree probe;
if (INDIRECT_REF_P (argument))
{
/* Strip the dereference temporarily. */
gcc_assert (REFERENCE_REF_P (argument));
argument = TREE_OPERAND (argument, 0);
}
/* If we're in a template, we represent a qualified-id referring
to a static data member as a SCOPE_REF even if the scope isn't
dependent so that we can check access control later. */
probe = argument;
if (TREE_CODE (probe) == SCOPE_REF)
probe = TREE_OPERAND (probe, 1);
if (VAR_P (probe))
{
/* A variable without external linkage might still be a
valid constant-expression, so no error is issued here
if the external-linkage check fails. */
if (!address_p && !DECL_EXTERNAL_LINKAGE_P (probe))
cp_parser_simulate_error (parser);
}
else if (is_overloaded_fn (argument))
/* All overloaded functions are allowed; if the external
linkage test does not pass, an error will be issued
later. */
;
else if (address_p
&& (TREE_CODE (argument) == OFFSET_REF
|| TREE_CODE (argument) == SCOPE_REF))
/* A pointer-to-member. */
;
else if (TREE_CODE (argument) == TEMPLATE_PARM_INDEX)
;
else
cp_parser_simulate_error (parser);
if (cp_parser_parse_definitely (parser))
{
if (address_p)
argument = build_x_unary_op (loc, ADDR_EXPR, argument,
tf_warning_or_error);
else
argument = convert_from_reference (argument);
return argument;
}
}
}
/* If the argument started with "&", there are no other valid
alternatives at this point. */
if (address_p)
{
cp_parser_error (parser, "invalid non-type template argument");
return error_mark_node;
}
/* If the argument wasn't successfully parsed as a type-id followed
by '>>', the argument can only be a constant expression now.
Otherwise, we try parsing the constant-expression tentatively,
because the argument could really be a type-id. */
if (maybe_type_id)
cp_parser_parse_tentatively (parser);
argument = cp_parser_constant_expression (parser);
if (!maybe_type_id)
return argument;
if (!cp_parser_next_token_ends_template_argument_p (parser))
cp_parser_error (parser, "expected template-argument");
if (cp_parser_parse_definitely (parser))
return argument;
/* We did our best to parse the argument as a non type-id, but that
was the only alternative that matched (albeit with a '>' after
it). We can assume it's just a typo from the user, and a
diagnostic will then be issued. */
return cp_parser_template_type_arg (parser);
}
/* Parse an explicit-instantiation.
explicit-instantiation:
template declaration
Although the standard says `declaration', what it really means is:
explicit-instantiation:
template decl-specifier-seq [opt] declarator [opt] ;
Things like `template int S<int>::i = 5, int S<double>::j;' are not
supposed to be allowed. A defect report has been filed about this
issue.
GNU Extension:
explicit-instantiation:
storage-class-specifier template
decl-specifier-seq [opt] declarator [opt] ;
function-specifier template
decl-specifier-seq [opt] declarator [opt] ; */
static void
cp_parser_explicit_instantiation (cp_parser* parser)
{
int declares_class_or_enum;
cp_decl_specifier_seq decl_specifiers;
tree extension_specifier = NULL_TREE;
timevar_push (TV_TEMPLATE_INST);
/* Look for an (optional) storage-class-specifier or
function-specifier. */
if (cp_parser_allow_gnu_extensions_p (parser))
{
extension_specifier
= cp_parser_storage_class_specifier_opt (parser);
if (!extension_specifier)
extension_specifier
= cp_parser_function_specifier_opt (parser,
/*decl_specs=*/NULL);
}
/* Look for the `template' keyword. */
cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE);
/* Let the front end know that we are processing an explicit
instantiation. */
begin_explicit_instantiation ();
/* [temp.explicit] says that we are supposed to ignore access
control while processing explicit instantiation directives. */
push_deferring_access_checks (dk_no_check);
/* Parse a decl-specifier-seq. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
/* If there was exactly one decl-specifier, and it declared a class,
and there's no declarator, then we have an explicit type
instantiation. */
if (declares_class_or_enum && cp_parser_declares_only_class_p (parser))
{
tree type;
type = check_tag_decl (&decl_specifiers,
/*explicit_type_instantiation_p=*/true);
/* Turn access control back on for names used during
template instantiation. */
pop_deferring_access_checks ();
if (type)
do_type_instantiation (type, extension_specifier,
/*complain=*/tf_error);
}
else
{
cp_declarator *declarator;
tree decl;
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false,
/*friend_p=*/false);
if (declares_class_or_enum & 2)
cp_parser_check_for_definition_in_return_type (declarator,
decl_specifiers.type,
decl_specifiers.locations[ds_type_spec]);
if (declarator != cp_error_declarator)
{
if (decl_spec_seq_has_spec_p (&decl_specifiers, ds_inline))
permerror (decl_specifiers.locations[ds_inline],
"explicit instantiation shall not use"
" %<inline%> specifier");
if (decl_spec_seq_has_spec_p (&decl_specifiers, ds_constexpr))
permerror (decl_specifiers.locations[ds_constexpr],
"explicit instantiation shall not use"
" %<constexpr%> specifier");
decl = grokdeclarator (declarator, &decl_specifiers,
NORMAL, 0, &decl_specifiers.attributes);
/* Turn access control back on for names used during
template instantiation. */
pop_deferring_access_checks ();
/* Do the explicit instantiation. */
do_decl_instantiation (decl, extension_specifier);
}
else
{
pop_deferring_access_checks ();
/* Skip the body of the explicit instantiation. */
cp_parser_skip_to_end_of_statement (parser);
}
}
/* We're done with the instantiation. */
end_explicit_instantiation ();
cp_parser_consume_semicolon_at_end_of_statement (parser);
timevar_pop (TV_TEMPLATE_INST);
}
/* Parse an explicit-specialization.
explicit-specialization:
template < > declaration
Although the standard says `declaration', what it really means is:
explicit-specialization:
template <> decl-specifier [opt] init-declarator [opt] ;
template <> function-definition
template <> explicit-specialization
template <> template-declaration */
static void
cp_parser_explicit_specialization (cp_parser* parser)
{
bool need_lang_pop;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Look for the `template' keyword. */
cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE);
/* Look for the `<'. */
cp_parser_require (parser, CPP_LESS, RT_LESS);
/* Look for the `>'. */
cp_parser_require (parser, CPP_GREATER, RT_GREATER);
/* We have processed another parameter list. */
++parser->num_template_parameter_lists;
/* [temp]
A template ... explicit specialization ... shall not have C
linkage. */
if (current_lang_name == lang_name_c)
{
error_at (token->location, "template specialization with C linkage");
/* Give it C++ linkage to avoid confusing other parts of the
front end. */
push_lang_context (lang_name_cplusplus);
need_lang_pop = true;
}
else
need_lang_pop = false;
/* Let the front end know that we are beginning a specialization. */
if (!begin_specialization ())
{
end_specialization ();
return;
}
/* If the next keyword is `template', we need to figure out whether
or not we're looking a template-declaration. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_GREATER)
cp_parser_template_declaration_after_export (parser,
/*member_p=*/false);
else
cp_parser_explicit_specialization (parser);
}
else
/* Parse the dependent declaration. */
cp_parser_single_declaration (parser,
/*checks=*/NULL,
/*member_p=*/false,
/*explicit_specialization_p=*/true,
/*friend_p=*/NULL);
/* We're done with the specialization. */
end_specialization ();
/* For the erroneous case of a template with C linkage, we pushed an
implicit C++ linkage scope; exit that scope now. */
if (need_lang_pop)
pop_lang_context ();
/* We're done with this parameter list. */
--parser->num_template_parameter_lists;
}
/* Parse a type-specifier.
type-specifier:
simple-type-specifier
class-specifier
enum-specifier
elaborated-type-specifier
cv-qualifier
GNU Extension:
type-specifier:
__complex__
Returns a representation of the type-specifier. For a
class-specifier, enum-specifier, or elaborated-type-specifier, a
TREE_TYPE is returned; otherwise, a TYPE_DECL is returned.
The parser flags FLAGS is used to control type-specifier parsing.
If IS_DECLARATION is TRUE, then this type-specifier is appearing
in a decl-specifier-seq.
If DECLARES_CLASS_OR_ENUM is non-NULL, and the type-specifier is a
class-specifier, enum-specifier, or elaborated-type-specifier, then
*DECLARES_CLASS_OR_ENUM is set to a nonzero value. The value is 1
if a type is declared; 2 if it is defined. Otherwise, it is set to
zero.
If IS_CV_QUALIFIER is non-NULL, and the type-specifier is a
cv-qualifier, then IS_CV_QUALIFIER is set to TRUE. Otherwise, it
is set to FALSE. */
static tree
cp_parser_type_specifier (cp_parser* parser,
cp_parser_flags flags,
cp_decl_specifier_seq *decl_specs,
bool is_declaration,
int* declares_class_or_enum,
bool* is_cv_qualifier)
{
tree type_spec = NULL_TREE;
cp_token *token;
enum rid keyword;
cp_decl_spec ds = ds_last;
/* Assume this type-specifier does not declare a new type. */
if (declares_class_or_enum)
*declares_class_or_enum = 0;
/* And that it does not specify a cv-qualifier. */
if (is_cv_qualifier)
*is_cv_qualifier = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a keyword, we can use that to guide the
production we choose. */
keyword = token->keyword;
switch (keyword)
{
case RID_ENUM:
if ((flags & CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS))
goto elaborated_type_specifier;
/* Look for the enum-specifier. */
type_spec = cp_parser_enum_specifier (parser);
/* If that worked, we're done. */
if (type_spec)
{
if (declares_class_or_enum)
*declares_class_or_enum = 2;
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs,
type_spec,
token,
/*type_definition_p=*/true);
return type_spec;
}
else
goto elaborated_type_specifier;
/* Any of these indicate either a class-specifier, or an
elaborated-type-specifier. */
case RID_CLASS:
case RID_STRUCT:
case RID_UNION:
if ((flags & CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS))
goto elaborated_type_specifier;
/* Parse tentatively so that we can back up if we don't find a
class-specifier. */
cp_parser_parse_tentatively (parser);
/* Look for the class-specifier. */
type_spec = cp_parser_class_specifier (parser);
invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, type_spec);
/* If that worked, we're done. */
if (cp_parser_parse_definitely (parser))
{
if (declares_class_or_enum)
*declares_class_or_enum = 2;
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs,
type_spec,
token,
/*type_definition_p=*/true);
return type_spec;
}
/* Fall through. */
elaborated_type_specifier:
/* We're declaring (not defining) a class or enum. */
if (declares_class_or_enum)
*declares_class_or_enum = 1;
/* Fall through. */
case RID_TYPENAME:
/* Look for an elaborated-type-specifier. */
type_spec
= (cp_parser_elaborated_type_specifier
(parser,
decl_spec_seq_has_spec_p (decl_specs, ds_friend),
is_declaration));
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs,
type_spec,
token,
/*type_definition_p=*/false);
return type_spec;
case RID_CONST:
ds = ds_const;
if (is_cv_qualifier)
*is_cv_qualifier = true;
break;
case RID_VOLATILE:
ds = ds_volatile;
if (is_cv_qualifier)
*is_cv_qualifier = true;
break;
case RID_RESTRICT:
ds = ds_restrict;
if (is_cv_qualifier)
*is_cv_qualifier = true;
break;
case RID_COMPLEX:
/* The `__complex__' keyword is a GNU extension. */
ds = ds_complex;
break;
default:
break;
}
/* Handle simple keywords. */
if (ds != ds_last)
{
if (decl_specs)
{
set_and_check_decl_spec_loc (decl_specs, ds, token);
decl_specs->any_specifiers_p = true;
}
return cp_lexer_consume_token (parser->lexer)->u.value;
}
/* If we do not already have a type-specifier, assume we are looking
at a simple-type-specifier. */
type_spec = cp_parser_simple_type_specifier (parser,
decl_specs,
flags);
/* If we didn't find a type-specifier, and a type-specifier was not
optional in this context, issue an error message. */
if (!type_spec && !(flags & CP_PARSER_FLAGS_OPTIONAL))
{
cp_parser_error (parser, "expected type specifier");
return error_mark_node;
}
return type_spec;
}
/* Parse a simple-type-specifier.
simple-type-specifier:
:: [opt] nested-name-specifier [opt] type-name
:: [opt] nested-name-specifier template template-id
char
wchar_t
bool
short
int
long
signed
unsigned
float
double
void
C++0x Extension:
simple-type-specifier:
auto
decltype ( expression )
char16_t
char32_t
__underlying_type ( type-id )
GNU Extension:
simple-type-specifier:
__int128
__typeof__ unary-expression
__typeof__ ( type-id )
__typeof__ ( type-id ) { initializer-list , [opt] }
Returns the indicated TYPE_DECL. If DECL_SPECS is not NULL, it is
appropriately updated. */
static tree
cp_parser_simple_type_specifier (cp_parser* parser,
cp_decl_specifier_seq *decl_specs,
cp_parser_flags flags)
{
tree type = NULL_TREE;
cp_token *token;
int idx;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a keyword, things are easy. */
switch (token->keyword)
{
case RID_CHAR:
if (decl_specs)
decl_specs->explicit_char_p = true;
type = char_type_node;
break;
case RID_CHAR16:
type = char16_type_node;
break;
case RID_CHAR32:
type = char32_type_node;
break;
case RID_WCHAR:
type = wchar_type_node;
break;
case RID_BOOL:
type = boolean_type_node;
break;
case RID_SHORT:
set_and_check_decl_spec_loc (decl_specs, ds_short, token);
type = short_integer_type_node;
break;
case RID_INT:
if (decl_specs)
decl_specs->explicit_int_p = true;
type = integer_type_node;
break;
case RID_INT_N_0:
case RID_INT_N_1:
case RID_INT_N_2:
case RID_INT_N_3:
idx = token->keyword - RID_INT_N_0;
if (! int_n_enabled_p [idx])
break;
if (decl_specs)
{
decl_specs->explicit_intN_p = true;
decl_specs->int_n_idx = idx;
}
type = int_n_trees [idx].signed_type;
break;
case RID_LONG:
if (decl_specs)
set_and_check_decl_spec_loc (decl_specs, ds_long, token);
type = long_integer_type_node;
break;
case RID_SIGNED:
set_and_check_decl_spec_loc (decl_specs, ds_signed, token);
type = integer_type_node;
break;
case RID_UNSIGNED:
set_and_check_decl_spec_loc (decl_specs, ds_unsigned, token);
type = unsigned_type_node;
break;
case RID_FLOAT:
type = float_type_node;
break;
case RID_DOUBLE:
type = double_type_node;
break;
case RID_VOID:
type = void_type_node;
break;
case RID_AUTO:
maybe_warn_cpp0x (CPP0X_AUTO);
if (parser->auto_is_implicit_function_template_parm_p)
{
if (cxx_dialect >= cxx14)
type = synthesize_implicit_template_parm (parser);
else
type = error_mark_node;
if (current_class_type && LAMBDA_TYPE_P (current_class_type))
{
if (cxx_dialect < cxx14)
error_at (token->location,
"use of %<auto%> in lambda parameter declaration "
"only available with "
"-std=c++14 or -std=gnu++14");
}
else if (cxx_dialect < cxx14)
error_at (token->location,
"use of %<auto%> in parameter declaration "
"only available with "
"-std=c++14 or -std=gnu++14");
else
pedwarn (token->location, OPT_Wpedantic,
"ISO C++ forbids use of %<auto%> in parameter "
"declaration");
}
else
type = make_auto ();
break;
case RID_DECLTYPE:
/* Since DR 743, decltype can either be a simple-type-specifier by
itself or begin a nested-name-specifier. Parsing it will replace
it with a CPP_DECLTYPE, so just rewind and let the CPP_DECLTYPE
handling below decide what to do. */
cp_parser_decltype (parser);
cp_lexer_set_token_position (parser->lexer, token);
break;
case RID_TYPEOF:
/* Consume the `typeof' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the operand to `typeof'. */
type = cp_parser_sizeof_operand (parser, RID_TYPEOF);
/* If it is not already a TYPE, take its type. */
if (!TYPE_P (type))
type = finish_typeof (type);
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token,
/*type_definition_p=*/false);
return type;
case RID_UNDERLYING_TYPE:
type = cp_parser_trait_expr (parser, RID_UNDERLYING_TYPE);
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token,
/*type_definition_p=*/false);
return type;
case RID_BASES:
case RID_DIRECT_BASES:
type = cp_parser_trait_expr (parser, token->keyword);
if (decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token,
/*type_definition_p=*/false);
return type;
default:
break;
}
/* If token is an already-parsed decltype not followed by ::,
it's a simple-type-specifier. */
if (token->type == CPP_DECLTYPE
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE)
{
type = token->u.value;
if (decl_specs)
{
cp_parser_set_decl_spec_type (decl_specs, type,
token,
/*type_definition_p=*/false);
/* Remember that we are handling a decltype in order to
implement the resolution of DR 1510 when the argument
isn't instantiation dependent. */
decl_specs->decltype_p = true;
}
cp_lexer_consume_token (parser->lexer);
return type;
}
/* If the type-specifier was for a built-in type, we're done. */
if (type)
{
/* Record the type. */
if (decl_specs
&& (token->keyword != RID_SIGNED
&& token->keyword != RID_UNSIGNED
&& token->keyword != RID_SHORT
&& token->keyword != RID_LONG))
cp_parser_set_decl_spec_type (decl_specs,
type,
token,
/*type_definition_p=*/false);
if (decl_specs)
decl_specs->any_specifiers_p = true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
if (type == error_mark_node)
return error_mark_node;
/* There is no valid C++ program where a non-template type is
followed by a "<". That usually indicates that the user thought
that the type was a template. */
cp_parser_check_for_invalid_template_id (parser, type, none_type,
token->location);
return TYPE_NAME (type);
}
/* The type-specifier must be a user-defined type. */
if (!(flags & CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES))
{
bool qualified_p;
bool global_p;
/* Don't gobble tokens or issue error messages if this is an
optional type-specifier. */
if (flags & CP_PARSER_FLAGS_OPTIONAL)
cp_parser_parse_tentatively (parser);
/* Look for the optional `::' operator. */
global_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* Look for the nested-name specifier. */
qualified_p
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/false)
!= NULL_TREE);
token = cp_lexer_peek_token (parser->lexer);
/* If we have seen a nested-name-specifier, and the next token
is `template', then we are using the template-id production. */
if (parser->scope
&& cp_parser_optional_template_keyword (parser))
{
/* Look for the template-id. */
type = cp_parser_template_id (parser,
/*template_keyword_p=*/true,
/*check_dependency_p=*/true,
none_type,
/*is_declaration=*/false);
/* If the template-id did not name a type, we are out of
luck. */
if (TREE_CODE (type) != TYPE_DECL)
{
cp_parser_error (parser, "expected template-id for type");
type = NULL_TREE;
}
}
/* Otherwise, look for a type-name. */
else
type = cp_parser_type_name (parser);
/* Keep track of all name-lookups performed in class scopes. */
if (type
&& !global_p
&& !qualified_p
&& TREE_CODE (type) == TYPE_DECL
&& identifier_p (DECL_NAME (type)))
maybe_note_name_used_in_class (DECL_NAME (type), type);
/* If it didn't work out, we don't have a TYPE. */
if ((flags & CP_PARSER_FLAGS_OPTIONAL)
&& !cp_parser_parse_definitely (parser))
type = NULL_TREE;
if (type && decl_specs)
cp_parser_set_decl_spec_type (decl_specs, type,
token,
/*type_definition_p=*/false);
}
/* If we didn't get a type-name, issue an error message. */
if (!type && !(flags & CP_PARSER_FLAGS_OPTIONAL))
{
cp_parser_error (parser, "expected type-name");
return error_mark_node;
}
if (type && type != error_mark_node)
{
/* See if TYPE is an Objective-C type, and if so, parse and
accept any protocol references following it. Do this before
the cp_parser_check_for_invalid_template_id() call, because
Objective-C types can be followed by '<...>' which would
enclose protocol names rather than template arguments, and so
everything is fine. */
if (c_dialect_objc () && !parser->scope
&& (objc_is_id (type) || objc_is_class_name (type)))
{
tree protos = cp_parser_objc_protocol_refs_opt (parser);
tree qual_type = objc_get_protocol_qualified_type (type, protos);
/* Clobber the "unqualified" type previously entered into
DECL_SPECS with the new, improved protocol-qualified version. */
if (decl_specs)
decl_specs->type = qual_type;
return qual_type;
}
/* There is no valid C++ program where a non-template type is
followed by a "<". That usually indicates that the user
thought that the type was a template. */
cp_parser_check_for_invalid_template_id (parser, TREE_TYPE (type),
none_type,
token->location);
}
return type;
}
/* Parse a type-name.
type-name:
class-name
enum-name
typedef-name
simple-template-id [in c++0x]
enum-name:
identifier
typedef-name:
identifier
Returns a TYPE_DECL for the type. */
static tree
cp_parser_type_name (cp_parser* parser)
{
tree type_decl;
/* We can't know yet whether it is a class-name or not. */
cp_parser_parse_tentatively (parser);
/* Try a class-name. */
type_decl = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
none_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/false);
/* If it's not a class-name, keep looking. */
if (!cp_parser_parse_definitely (parser))
{
if (cxx_dialect < cxx11)
/* It must be a typedef-name or an enum-name. */
return cp_parser_nonclass_name (parser);
cp_parser_parse_tentatively (parser);
/* It is either a simple-template-id representing an
instantiation of an alias template... */
type_decl = cp_parser_template_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
none_type,
/*is_declaration=*/false);
/* Note that this must be an instantiation of an alias template
because [temp.names]/6 says:
A template-id that names an alias template specialization
is a type-name.
Whereas [temp.names]/7 says:
A simple-template-id that names a class template
specialization is a class-name. */
if (type_decl != NULL_TREE
&& TREE_CODE (type_decl) == TYPE_DECL
&& TYPE_DECL_ALIAS_P (type_decl))
gcc_assert (DECL_TEMPLATE_INSTANTIATION (type_decl));
else
cp_parser_simulate_error (parser);
if (!cp_parser_parse_definitely (parser))
/* ... Or a typedef-name or an enum-name. */
return cp_parser_nonclass_name (parser);
}
return type_decl;
}
/* Parse a non-class type-name, that is, either an enum-name or a typedef-name.
enum-name:
identifier
typedef-name:
identifier
Returns a TYPE_DECL for the type. */
static tree
cp_parser_nonclass_name (cp_parser* parser)
{
tree type_decl;
tree identifier;
cp_token *token = cp_lexer_peek_token (parser->lexer);
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return error_mark_node;
/* Look up the type-name. */
type_decl = cp_parser_lookup_name_simple (parser, identifier, token->location);
type_decl = strip_using_decl (type_decl);
if (TREE_CODE (type_decl) != TYPE_DECL
&& (objc_is_id (identifier) || objc_is_class_name (identifier)))
{
/* See if this is an Objective-C type. */
tree protos = cp_parser_objc_protocol_refs_opt (parser);
tree type = objc_get_protocol_qualified_type (identifier, protos);
if (type)
type_decl = TYPE_NAME (type);
}
/* Issue an error if we did not find a type-name. */
if (TREE_CODE (type_decl) != TYPE_DECL
/* In Objective-C, we have the complication that class names are
normally type names and start declarations (eg, the
"NSObject" in "NSObject *object;"), but can be used in an
Objective-C 2.0 dot-syntax (as in "NSObject.version") which
is an expression. So, a classname followed by a dot is not a
valid type-name. */
|| (objc_is_class_name (TREE_TYPE (type_decl))
&& cp_lexer_peek_token (parser->lexer)->type == CPP_DOT))
{
if (!cp_parser_simulate_error (parser))
cp_parser_name_lookup_error (parser, identifier, type_decl,
NLE_TYPE, token->location);
return error_mark_node;
}
/* Remember that the name was used in the definition of the
current class so that we can check later to see if the
meaning would have been different after the class was
entirely defined. */
else if (type_decl != error_mark_node
&& !parser->scope)
maybe_note_name_used_in_class (identifier, type_decl);
return type_decl;
}
/* Parse an elaborated-type-specifier. Note that the grammar given
here incorporates the resolution to DR68.
elaborated-type-specifier:
class-key :: [opt] nested-name-specifier [opt] identifier
class-key :: [opt] nested-name-specifier [opt] template [opt] template-id
enum-key :: [opt] nested-name-specifier [opt] identifier
typename :: [opt] nested-name-specifier identifier
typename :: [opt] nested-name-specifier template [opt]
template-id
GNU extension:
elaborated-type-specifier:
class-key attributes :: [opt] nested-name-specifier [opt] identifier
class-key attributes :: [opt] nested-name-specifier [opt]
template [opt] template-id
enum attributes :: [opt] nested-name-specifier [opt] identifier
If IS_FRIEND is TRUE, then this elaborated-type-specifier is being
declared `friend'. If IS_DECLARATION is TRUE, then this
elaborated-type-specifier appears in a decl-specifiers-seq, i.e.,
something is being declared.
Returns the TYPE specified. */
static tree
cp_parser_elaborated_type_specifier (cp_parser* parser,
bool is_friend,
bool is_declaration)
{
enum tag_types tag_type;
tree identifier;
tree type = NULL_TREE;
tree attributes = NULL_TREE;
tree globalscope;
cp_token *token = NULL;
/* See if we're looking at the `enum' keyword. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ENUM))
{
/* Consume the `enum' token. */
cp_lexer_consume_token (parser->lexer);
/* Remember that it's an enumeration type. */
tag_type = enum_type;
/* Issue a warning if the `struct' or `class' key (for C++0x scoped
enums) is used here. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CLASS)
|| cp_lexer_next_token_is_keyword (parser->lexer, RID_STRUCT))
{
pedwarn (input_location, 0, "elaborated-type-specifier "
"for a scoped enum must not use the %<%D%> keyword",
cp_lexer_peek_token (parser->lexer)->u.value);
/* Consume the `struct' or `class' and parse it anyway. */
cp_lexer_consume_token (parser->lexer);
}
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
}
/* Or, it might be `typename'. */
else if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_TYPENAME))
{
/* Consume the `typename' token. */
cp_lexer_consume_token (parser->lexer);
/* Remember that it's a `typename' type. */
tag_type = typename_type;
}
/* Otherwise it must be a class-key. */
else
{
tag_type = cp_parser_class_key (parser);
if (tag_type == none_type)
return error_mark_node;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
}
/* Look for the `::' operator. */
globalscope = cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the nested-name-specifier. */
if (tag_type == typename_type && !globalscope)
{
if (!cp_parser_nested_name_specifier (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
/*type_p=*/true,
is_declaration))
return error_mark_node;
}
else
/* Even though `typename' is not present, the proposed resolution
to Core Issue 180 says that in `class A<T>::B', `B' should be
considered a type-name, even if `A<T>' is dependent. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
/*type_p=*/true,
is_declaration);
/* For everything but enumeration types, consider a template-id.
For an enumeration type, consider only a plain identifier. */
if (tag_type != enum_type)
{
bool template_p = false;
tree decl;
/* Allow the `template' keyword. */
template_p = cp_parser_optional_template_keyword (parser);
/* If we didn't see `template', we don't know if there's a
template-id or not. */
if (!template_p)
cp_parser_parse_tentatively (parser);
/* Parse the template-id. */
token = cp_lexer_peek_token (parser->lexer);
decl = cp_parser_template_id (parser, template_p,
/*check_dependency_p=*/true,
tag_type,
is_declaration);
/* If we didn't find a template-id, look for an ordinary
identifier. */
if (!template_p && !cp_parser_parse_definitely (parser))
;
/* We can get here when cp_parser_template_id, called by
cp_parser_class_name with tag_type == none_type, succeeds
and caches a BASELINK. Then, when called again here,
instead of failing and returning an error_mark_node
returns it (see template/typename17.C in C++11).
??? Could we diagnose this earlier? */
else if (tag_type == typename_type && BASELINK_P (decl))
{
cp_parser_diagnose_invalid_type_name (parser, decl, token->location);
type = error_mark_node;
}
/* If DECL is a TEMPLATE_ID_EXPR, and the `typename' keyword is
in effect, then we must assume that, upon instantiation, the
template will correspond to a class. */
else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR
&& tag_type == typename_type)
type = make_typename_type (parser->scope, decl,
typename_type,
/*complain=*/tf_error);
/* If the `typename' keyword is in effect and DECL is not a type
decl, then type is non existent. */
else if (tag_type == typename_type && TREE_CODE (decl) != TYPE_DECL)
;
else if (TREE_CODE (decl) == TYPE_DECL)
type = check_elaborated_type_specifier (tag_type, decl,
/*allow_template_p=*/true);
else if (decl == error_mark_node)
type = error_mark_node;
}
if (!type)
{
token = cp_lexer_peek_token (parser->lexer);
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
{
parser->scope = NULL_TREE;
return error_mark_node;
}
/* For a `typename', we needn't call xref_tag. */
if (tag_type == typename_type
&& TREE_CODE (parser->scope) != NAMESPACE_DECL)
return cp_parser_make_typename_type (parser, identifier,
token->location);
/* Template parameter lists apply only if we are not within a
function parameter list. */
bool template_parm_lists_apply
= parser->num_template_parameter_lists;
if (template_parm_lists_apply)
for (cp_binding_level *s = current_binding_level;
s && s->kind != sk_template_parms;
s = s->level_chain)
if (s->kind == sk_function_parms)
template_parm_lists_apply = false;
/* Look up a qualified name in the usual way. */
if (parser->scope)
{
tree decl;
tree ambiguous_decls;
decl = cp_parser_lookup_name (parser, identifier,
tag_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
&ambiguous_decls,
token->location);
/* If the lookup was ambiguous, an error will already have been
issued. */
if (ambiguous_decls)
return error_mark_node;
/* If we are parsing friend declaration, DECL may be a
TEMPLATE_DECL tree node here. However, we need to check
whether this TEMPLATE_DECL results in valid code. Consider
the following example:
namespace N {
template <class T> class C {};
}
class X {
template <class T> friend class N::C; // #1, valid code
};
template <class T> class Y {
friend class N::C; // #2, invalid code
};
For both case #1 and #2, we arrive at a TEMPLATE_DECL after
name lookup of `N::C'. We see that friend declaration must
be template for the code to be valid. Note that
processing_template_decl does not work here since it is
always 1 for the above two cases. */
decl = (cp_parser_maybe_treat_template_as_class
(decl, /*tag_name_p=*/is_friend
&& template_parm_lists_apply));
if (TREE_CODE (decl) != TYPE_DECL)
{
cp_parser_diagnose_invalid_type_name (parser,
identifier,
token->location);
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (decl)) != TYPENAME_TYPE)
{
bool allow_template = (template_parm_lists_apply
|| DECL_SELF_REFERENCE_P (decl));
type = check_elaborated_type_specifier (tag_type, decl,
allow_template);
if (type == error_mark_node)
return error_mark_node;
}
/* Forward declarations of nested types, such as
class C1::C2;
class C1::C2::C3;
are invalid unless all components preceding the final '::'
are complete. If all enclosing types are complete, these
declarations become merely pointless.
Invalid forward declarations of nested types are errors
caught elsewhere in parsing. Those that are pointless arrive
here. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
&& !is_friend && !processing_explicit_instantiation)
warning (0, "declaration %qD does not declare anything", decl);
type = TREE_TYPE (decl);
}
else
{
/* An elaborated-type-specifier sometimes introduces a new type and
sometimes names an existing type. Normally, the rule is that it
introduces a new type only if there is not an existing type of
the same name already in scope. For example, given:
struct S {};
void f() { struct S s; }
the `struct S' in the body of `f' is the same `struct S' as in
the global scope; the existing definition is used. However, if
there were no global declaration, this would introduce a new
local class named `S'.
An exception to this rule applies to the following code:
namespace N { struct S; }
Here, the elaborated-type-specifier names a new type
unconditionally; even if there is already an `S' in the
containing scope this declaration names a new type.
This exception only applies if the elaborated-type-specifier
forms the complete declaration:
[class.name]
A declaration consisting solely of `class-key identifier ;' is
either a redeclaration of the name in the current scope or a
forward declaration of the identifier as a class name. It
introduces the name into the current scope.
We are in this situation precisely when the next token is a `;'.
An exception to the exception is that a `friend' declaration does
*not* name a new type; i.e., given:
struct S { friend struct T; };
`T' is not a new type in the scope of `S'.
Also, `new struct S' or `sizeof (struct S)' never results in the
definition of a new type; a new type can only be declared in a
declaration context. */
tag_scope ts;
bool template_p;
if (is_friend)
/* Friends have special name lookup rules. */
ts = ts_within_enclosing_non_class;
else if (is_declaration
&& cp_lexer_next_token_is (parser->lexer,
CPP_SEMICOLON))
/* This is a `class-key identifier ;' */
ts = ts_current;
else
ts = ts_global;
template_p =
(template_parm_lists_apply
&& (cp_parser_next_token_starts_class_definition_p (parser)
|| cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)));
/* An unqualified name was used to reference this type, so
there were no qualifying templates. */
if (template_parm_lists_apply
&& !cp_parser_check_template_parameters (parser,
/*num_templates=*/0,
token->location,
/*declarator=*/NULL))
return error_mark_node;
type = xref_tag (tag_type, identifier, ts, template_p);
}
}
if (type == error_mark_node)
return error_mark_node;
/* Allow attributes on forward declarations of classes. */
if (attributes)
{
if (TREE_CODE (type) == TYPENAME_TYPE)
warning (OPT_Wattributes,
"attributes ignored on uninstantiated type");
else if (tag_type != enum_type && CLASSTYPE_TEMPLATE_INSTANTIATION (type)
&& ! processing_explicit_instantiation)
warning (OPT_Wattributes,
"attributes ignored on template instantiation");
else if (is_declaration && cp_parser_declares_only_class_p (parser))
cplus_decl_attributes (&type, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE);
else
warning (OPT_Wattributes,
"attributes ignored on elaborated-type-specifier that is not a forward declaration");
}
if (tag_type != enum_type)
{
/* Indicate whether this class was declared as a `class' or as a
`struct'. */
if (TREE_CODE (type) == RECORD_TYPE)
CLASSTYPE_DECLARED_CLASS (type) = (tag_type == class_type);
cp_parser_check_class_key (tag_type, type);
}
/* A "<" cannot follow an elaborated type specifier. If that
happens, the user was probably trying to form a template-id. */
cp_parser_check_for_invalid_template_id (parser, type, tag_type,
token->location);
return type;
}
/* Parse an enum-specifier.
enum-specifier:
enum-head { enumerator-list [opt] }
enum-head { enumerator-list , } [C++0x]
enum-head:
enum-key identifier [opt] enum-base [opt]
enum-key nested-name-specifier identifier enum-base [opt]
enum-key:
enum
enum class [C++0x]
enum struct [C++0x]
enum-base: [C++0x]
: type-specifier-seq
opaque-enum-specifier:
enum-key identifier enum-base [opt] ;
GNU Extensions:
enum-key attributes[opt] identifier [opt] enum-base [opt]
{ enumerator-list [opt] }attributes[opt]
enum-key attributes[opt] identifier [opt] enum-base [opt]
{ enumerator-list, }attributes[opt] [C++0x]
Returns an ENUM_TYPE representing the enumeration, or NULL_TREE
if the token stream isn't an enum-specifier after all. */
static tree
cp_parser_enum_specifier (cp_parser* parser)
{
tree identifier;
tree type = NULL_TREE;
tree prev_scope;
tree nested_name_specifier = NULL_TREE;
tree attributes;
bool scoped_enum_p = false;
bool has_underlying_type = false;
bool nested_being_defined = false;
bool new_value_list = false;
bool is_new_type = false;
bool is_anonymous = false;
tree underlying_type = NULL_TREE;
cp_token *type_start_token = NULL;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
/* Parse tentatively so that we can back up if we don't find a
enum-specifier. */
cp_parser_parse_tentatively (parser);
/* Caller guarantees that the current token is 'enum', an identifier
possibly follows, and the token after that is an opening brace.
If we don't have an identifier, fabricate an anonymous name for
the enumeration being defined. */
cp_lexer_consume_token (parser->lexer);
/* Parse the "class" or "struct", which indicates a scoped
enumeration type in C++0x. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CLASS)
|| cp_lexer_next_token_is_keyword (parser->lexer, RID_STRUCT))
{
if (cxx_dialect < cxx11)
maybe_warn_cpp0x (CPP0X_SCOPED_ENUMS);
/* Consume the `struct' or `class' token. */
cp_lexer_consume_token (parser->lexer);
scoped_enum_p = true;
}
attributes = cp_parser_attributes_opt (parser);
/* Clear the qualification. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
/* Figure out in what scope the declaration is being placed. */
prev_scope = current_scope ();
type_start_token = cp_lexer_peek_token (parser->lexer);
push_deferring_access_checks (dk_no_check);
nested_name_specifier
= cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/false,
/*type_p=*/false,
/*is_declaration=*/false);
if (nested_name_specifier)
{
tree name;
identifier = cp_parser_identifier (parser);
name = cp_parser_lookup_name (parser, identifier,
enum_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
input_location);
if (name && name != error_mark_node)
{
type = TREE_TYPE (name);
if (TREE_CODE (type) == TYPENAME_TYPE)
{
/* Are template enums allowed in ISO? */
if (template_parm_scope_p ())
pedwarn (type_start_token->location, OPT_Wpedantic,
"%qD is an enumeration template", name);
/* ignore a typename reference, for it will be solved by name
in start_enum. */
type = NULL_TREE;
}
}
else if (nested_name_specifier == error_mark_node)
/* We already issued an error. */;
else
error_at (type_start_token->location,
"%qD is not an enumerator-name", identifier);
}
else
{
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
identifier = cp_parser_identifier (parser);
else
{
identifier = make_anon_name ();
is_anonymous = true;
if (scoped_enum_p)
error_at (type_start_token->location,
"anonymous scoped enum is not allowed");
}
}
pop_deferring_access_checks ();
/* Check for the `:' that denotes a specified underlying type in C++0x.
Note that a ':' could also indicate a bitfield width, however. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
cp_decl_specifier_seq type_specifiers;
/* Consume the `:'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/false,
/*is_trailing_return=*/false,
&type_specifiers);
/* At this point this is surely not elaborated type specifier. */
if (!cp_parser_parse_definitely (parser))
return NULL_TREE;
if (cxx_dialect < cxx11)
maybe_warn_cpp0x (CPP0X_SCOPED_ENUMS);
has_underlying_type = true;
/* If that didn't work, stop. */
if (type_specifiers.type != error_mark_node)
{
underlying_type = grokdeclarator (NULL, &type_specifiers, TYPENAME,
/*initialized=*/0, NULL);
if (underlying_type == error_mark_node
|| check_for_bare_parameter_packs (underlying_type))
underlying_type = NULL_TREE;
}
}
/* Look for the `{' but don't consume it yet. */
if (!cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
if (cxx_dialect < cxx11 || (!scoped_enum_p && !underlying_type))
{
cp_parser_error (parser, "expected %<{%>");
if (has_underlying_type)
{
type = NULL_TREE;
goto out;
}
}
/* An opaque-enum-specifier must have a ';' here. */
if ((scoped_enum_p || underlying_type)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
cp_parser_error (parser, "expected %<;%> or %<{%>");
if (has_underlying_type)
{
type = NULL_TREE;
goto out;
}
}
}
if (!has_underlying_type && !cp_parser_parse_definitely (parser))
return NULL_TREE;
if (nested_name_specifier)
{
if (CLASS_TYPE_P (nested_name_specifier))
{
nested_being_defined = TYPE_BEING_DEFINED (nested_name_specifier);
TYPE_BEING_DEFINED (nested_name_specifier) = 1;
push_scope (nested_name_specifier);
}
else if (TREE_CODE (nested_name_specifier) == NAMESPACE_DECL)
{
push_nested_namespace (nested_name_specifier);
}
}
/* Issue an error message if type-definitions are forbidden here. */
if (!cp_parser_check_type_definition (parser))
type = error_mark_node;
else
/* Create the new type. We do this before consuming the opening
brace so the enum will be recorded as being on the line of its
tag (or the 'enum' keyword, if there is no tag). */
type = start_enum (identifier, type, underlying_type,
scoped_enum_p, &is_new_type);
/* If the next token is not '{' it is an opaque-enum-specifier or an
elaborated-type-specifier. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
timevar_push (TV_PARSE_ENUM);
if (nested_name_specifier
&& nested_name_specifier != error_mark_node)
{
/* The following catches invalid code such as:
enum class S<int>::E { A, B, C }; */
if (!processing_specialization
&& CLASS_TYPE_P (nested_name_specifier)
&& CLASSTYPE_USE_TEMPLATE (nested_name_specifier))
error_at (type_start_token->location, "cannot add an enumerator "
"list to a template instantiation");
if (TREE_CODE (nested_name_specifier) == TYPENAME_TYPE)
{
error_at (type_start_token->location,
"%<%T::%E%> has not been declared",
TYPE_CONTEXT (nested_name_specifier),
nested_name_specifier);
type = error_mark_node;
}
/* If that scope does not contain the scope in which the
class was originally declared, the program is invalid. */
else if (prev_scope && !is_ancestor (prev_scope,
nested_name_specifier))
{
if (at_namespace_scope_p ())
error_at (type_start_token->location,
"declaration of %qD in namespace %qD which does not "
"enclose %qD",
type, prev_scope, nested_name_specifier);
else
error_at (type_start_token->location,
"declaration of %qD in %qD which does not "
"enclose %qD",
type, prev_scope, nested_name_specifier);
type = error_mark_node;
}
}
if (scoped_enum_p)
begin_scope (sk_scoped_enum, type);
/* Consume the opening brace. */
cp_lexer_consume_token (parser->lexer);
if (type == error_mark_node)
; /* Nothing to add */
else if (OPAQUE_ENUM_P (type)
|| (cxx_dialect > cxx98 && processing_specialization))
{
new_value_list = true;
SET_OPAQUE_ENUM_P (type, false);
DECL_SOURCE_LOCATION (TYPE_NAME (type)) = type_start_token->location;
}
else
{
error_at (type_start_token->location,
"multiple definition of %q#T", type);
inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)),
"previous definition here");
type = error_mark_node;
}
if (type == error_mark_node)
cp_parser_skip_to_end_of_block_or_statement (parser);
/* If the next token is not '}', then there are some enumerators. */
else if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
{
if (is_anonymous && !scoped_enum_p)
pedwarn (type_start_token->location, OPT_Wpedantic,
"ISO C++ forbids empty anonymous enum");
}
else
cp_parser_enumerator_list (parser, type);
/* Consume the final '}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
if (scoped_enum_p)
finish_scope ();
timevar_pop (TV_PARSE_ENUM);
}
else
{
/* If a ';' follows, then it is an opaque-enum-specifier
and additional restrictions apply. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
if (is_anonymous)
error_at (type_start_token->location,
"opaque-enum-specifier without name");
else if (nested_name_specifier)
error_at (type_start_token->location,
"opaque-enum-specifier must use a simple identifier");
}
}
/* Look for trailing attributes to apply to this enumeration, and
apply them if appropriate. */
if (cp_parser_allow_gnu_extensions_p (parser))
{
tree trailing_attr = cp_parser_gnu_attributes_opt (parser);
trailing_attr = chainon (trailing_attr, attributes);
cplus_decl_attributes (&type,
trailing_attr,
(int) ATTR_FLAG_TYPE_IN_PLACE);
}
/* Finish up the enumeration. */
if (type != error_mark_node)
{
if (new_value_list)
finish_enum_value_list (type);
if (is_new_type)
finish_enum (type);
}
if (nested_name_specifier)
{
if (CLASS_TYPE_P (nested_name_specifier))
{
TYPE_BEING_DEFINED (nested_name_specifier) = nested_being_defined;
pop_scope (nested_name_specifier);
}
else if (TREE_CODE (nested_name_specifier) == NAMESPACE_DECL)
{
pop_nested_namespace (nested_name_specifier);
}
}
out:
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
return type;
}
/* Parse an enumerator-list. The enumerators all have the indicated
TYPE.
enumerator-list:
enumerator-definition
enumerator-list , enumerator-definition */
static void
cp_parser_enumerator_list (cp_parser* parser, tree type)
{
while (true)
{
/* Parse an enumerator-definition. */
cp_parser_enumerator_definition (parser, type);
/* If the next token is not a ',', we've reached the end of
the list. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Otherwise, consume the `,' and keep going. */
cp_lexer_consume_token (parser->lexer);
/* If the next token is a `}', there is a trailing comma. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
{
if (cxx_dialect < cxx11 && !in_system_header_at (input_location))
pedwarn (input_location, OPT_Wpedantic,
"comma at end of enumerator list");
break;
}
}
}
/* Parse an enumerator-definition. The enumerator has the indicated
TYPE.
enumerator-definition:
enumerator
enumerator = constant-expression
enumerator:
identifier */
static void
cp_parser_enumerator_definition (cp_parser* parser, tree type)
{
tree identifier;
tree value;
location_t loc;
/* Save the input location because we are interested in the location
of the identifier and not the location of the explicit value. */
loc = cp_lexer_peek_token (parser->lexer)->location;
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return;
/* If the next token is an '=', then there is an explicit value. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* Consume the `=' token. */
cp_lexer_consume_token (parser->lexer);
/* Parse the value. */
value = cp_parser_constant_expression (parser);
}
else
value = NULL_TREE;
/* If we are processing a template, make sure the initializer of the
enumerator doesn't contain any bare template parameter pack. */
if (check_for_bare_parameter_packs (value))
value = error_mark_node;
/* Create the enumerator. */
build_enumerator (identifier, value, type, loc);
}
/* Parse a namespace-name.
namespace-name:
original-namespace-name
namespace-alias
Returns the NAMESPACE_DECL for the namespace. */
static tree
cp_parser_namespace_name (cp_parser* parser)
{
tree identifier;
tree namespace_decl;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Get the name of the namespace. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return error_mark_node;
/* Look up the identifier in the currently active scope. Look only
for namespaces, due to:
[basic.lookup.udir]
When looking up a namespace-name in a using-directive or alias
definition, only namespace names are considered.
And:
[basic.lookup.qual]
During the lookup of a name preceding the :: scope resolution
operator, object, function, and enumerator names are ignored.
(Note that cp_parser_qualifying_entity only calls this
function if the token after the name is the scope resolution
operator.) */
namespace_decl = cp_parser_lookup_name (parser, identifier,
none_type,
/*is_template=*/false,
/*is_namespace=*/true,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
token->location);
/* If it's not a namespace, issue an error. */
if (namespace_decl == error_mark_node
|| TREE_CODE (namespace_decl) != NAMESPACE_DECL)
{
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
error_at (token->location, "%qD is not a namespace-name", identifier);
cp_parser_error (parser, "expected namespace-name");
namespace_decl = error_mark_node;
}
return namespace_decl;
}
/* Parse a namespace-definition.
namespace-definition:
named-namespace-definition
unnamed-namespace-definition
named-namespace-definition:
original-namespace-definition
extension-namespace-definition
original-namespace-definition:
namespace identifier { namespace-body }
extension-namespace-definition:
namespace original-namespace-name { namespace-body }
unnamed-namespace-definition:
namespace { namespace-body } */
static void
cp_parser_namespace_definition (cp_parser* parser)
{
tree identifier, attribs;
bool has_visibility;
bool is_inline;
cp_ensure_no_omp_declare_simd (parser);
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_INLINE))
{
maybe_warn_cpp0x (CPP0X_INLINE_NAMESPACES);
is_inline = true;
cp_lexer_consume_token (parser->lexer);
}
else
is_inline = false;
/* Look for the `namespace' keyword. */
cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE);
/* Get the name of the namespace. We do not attempt to distinguish
between an original-namespace-definition and an
extension-namespace-definition at this point. The semantic
analysis routines are responsible for that. */
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
identifier = cp_parser_identifier (parser);
else
identifier = NULL_TREE;
/* Parse any specified attributes. */
attribs = cp_parser_attributes_opt (parser);
/* Look for the `{' to start the namespace. */
cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE);
/* Start the namespace. */
push_namespace (identifier);
/* "inline namespace" is equivalent to a stub namespace definition
followed by a strong using directive. */
if (is_inline)
{
tree name_space = current_namespace;
/* Set up namespace association. */
DECL_NAMESPACE_ASSOCIATIONS (name_space)
= tree_cons (CP_DECL_CONTEXT (name_space), NULL_TREE,
DECL_NAMESPACE_ASSOCIATIONS (name_space));
/* Import the contents of the inline namespace. */
pop_namespace ();
do_using_directive (name_space);
push_namespace (identifier);
}
has_visibility = handle_namespace_attrs (current_namespace, attribs);
/* Parse the body of the namespace. */
cp_parser_namespace_body (parser);
if (has_visibility)
pop_visibility (1);
/* Finish the namespace. */
pop_namespace ();
/* Look for the final `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
/* Parse a namespace-body.
namespace-body:
declaration-seq [opt] */
static void
cp_parser_namespace_body (cp_parser* parser)
{
cp_parser_declaration_seq_opt (parser);
}
/* Parse a namespace-alias-definition.
namespace-alias-definition:
namespace identifier = qualified-namespace-specifier ; */
static void
cp_parser_namespace_alias_definition (cp_parser* parser)
{
tree identifier;
tree namespace_specifier;
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Look for the `namespace' keyword. */
cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE);
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return;
/* Look for the `=' token. */
if (!cp_parser_uncommitted_to_tentative_parse_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
error_at (token->location, "%<namespace%> definition is not allowed here");
/* Skip the definition. */
cp_lexer_consume_token (parser->lexer);
if (cp_parser_skip_to_closing_brace (parser))
cp_lexer_consume_token (parser->lexer);
return;
}
cp_parser_require (parser, CPP_EQ, RT_EQ);
/* Look for the qualified-namespace-specifier. */
namespace_specifier
= cp_parser_qualified_namespace_specifier (parser);
/* Look for the `;' token. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
/* Register the alias in the symbol table. */
do_namespace_alias (identifier, namespace_specifier);
}
/* Parse a qualified-namespace-specifier.
qualified-namespace-specifier:
:: [opt] nested-name-specifier [opt] namespace-name
Returns a NAMESPACE_DECL corresponding to the specified
namespace. */
static tree
cp_parser_qualified_namespace_specifier (cp_parser* parser)
{
/* Look for the optional `::'. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the optional nested-name-specifier. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
return cp_parser_namespace_name (parser);
}
/* Parse a using-declaration, or, if ACCESS_DECLARATION_P is true, an
access declaration.
using-declaration:
using typename [opt] :: [opt] nested-name-specifier unqualified-id ;
using :: unqualified-id ;
access-declaration:
qualified-id ;
*/
static bool
cp_parser_using_declaration (cp_parser* parser,
bool access_declaration_p)
{
cp_token *token;
bool typename_p = false;
bool global_scope_p;
tree decl;
tree identifier;
tree qscope;
int oldcount = errorcount;
cp_token *diag_token = NULL;
if (access_declaration_p)
{
diag_token = cp_lexer_peek_token (parser->lexer);
cp_parser_parse_tentatively (parser);
}
else
{
/* Look for the `using' keyword. */
cp_parser_require_keyword (parser, RID_USING, RT_USING);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* See if it's `typename'. */
if (token->keyword == RID_TYPENAME)
{
/* Remember that we've seen it. */
typename_p = true;
/* Consume the `typename' token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Look for the optional global scope qualification. */
global_scope_p
= (cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false)
!= NULL_TREE);
/* If we saw `typename', or didn't see `::', then there must be a
nested-name-specifier present. */
if (typename_p || !global_scope_p)
{
qscope = cp_parser_nested_name_specifier (parser, typename_p,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
if (!qscope && !cp_parser_uncommitted_to_tentative_parse_p (parser))
{
cp_parser_skip_to_end_of_block_or_statement (parser);
return false;
}
}
/* Otherwise, we could be in either of the two productions. In that
case, treat the nested-name-specifier as optional. */
else
qscope = cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
if (!qscope)
qscope = global_namespace;
else if (UNSCOPED_ENUM_P (qscope))
qscope = CP_TYPE_CONTEXT (qscope);
if (access_declaration_p && cp_parser_error_occurred (parser))
/* Something has already gone wrong; there's no need to parse
further. Since an error has occurred, the return value of
cp_parser_parse_definitely will be false, as required. */
return cp_parser_parse_definitely (parser);
token = cp_lexer_peek_token (parser->lexer);
/* Parse the unqualified-id. */
identifier = cp_parser_unqualified_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
/*declarator_p=*/true,
/*optional_p=*/false);
if (access_declaration_p)
{
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
cp_parser_simulate_error (parser);
if (!cp_parser_parse_definitely (parser))
return false;
}
/* The function we call to handle a using-declaration is different
depending on what scope we are in. */
if (qscope == error_mark_node || identifier == error_mark_node)
;
else if (!identifier_p (identifier)
&& TREE_CODE (identifier) != BIT_NOT_EXPR)
/* [namespace.udecl]
A using declaration shall not name a template-id. */
error_at (token->location,
"a template-id may not appear in a using-declaration");
else
{
if (at_class_scope_p ())
{
/* Create the USING_DECL. */
decl = do_class_using_decl (parser->scope, identifier);
if (decl && typename_p)
USING_DECL_TYPENAME_P (decl) = 1;
if (check_for_bare_parameter_packs (decl))
{
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
return false;
}
else
/* Add it to the list of members in this class. */
finish_member_declaration (decl);
}
else
{
decl = cp_parser_lookup_name_simple (parser,
identifier,
token->location);
if (decl == error_mark_node)
cp_parser_name_lookup_error (parser, identifier,
decl, NLE_NULL,
token->location);
else if (check_for_bare_parameter_packs (decl))
{
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
return false;
}
else if (!at_namespace_scope_p ())
do_local_using_decl (decl, qscope, identifier);
else
do_toplevel_using_decl (decl, qscope, identifier);
}
}
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (access_declaration_p && errorcount == oldcount)
warning_at (diag_token->location, OPT_Wdeprecated,
"access declarations are deprecated "
"in favour of using-declarations; "
"suggestion: add the %<using%> keyword");
return true;
}
/* Parse an alias-declaration.
alias-declaration:
using identifier attribute-specifier-seq [opt] = type-id */
static tree
cp_parser_alias_declaration (cp_parser* parser)
{
tree id, type, decl, pushed_scope = NULL_TREE, attributes;
location_t id_location;
cp_declarator *declarator;
cp_decl_specifier_seq decl_specs;
bool member_p;
const char *saved_message = NULL;
/* Look for the `using' keyword. */
cp_token *using_token
= cp_parser_require_keyword (parser, RID_USING, RT_USING);
if (using_token == NULL)
return error_mark_node;
id_location = cp_lexer_peek_token (parser->lexer)->location;
id = cp_parser_identifier (parser);
if (id == error_mark_node)
return error_mark_node;
cp_token *attrs_token = cp_lexer_peek_token (parser->lexer);
attributes = cp_parser_attributes_opt (parser);
if (attributes == error_mark_node)
return error_mark_node;
cp_parser_require (parser, CPP_EQ, RT_EQ);
if (cp_parser_error_occurred (parser))
return error_mark_node;
cp_parser_commit_to_tentative_parse (parser);
/* Now we are going to parse the type-id of the declaration. */
/*
[dcl.type]/3 says:
"A type-specifier-seq shall not define a class or enumeration
unless it appears in the type-id of an alias-declaration (7.1.3) that
is not the declaration of a template-declaration."
In other words, if we currently are in an alias template, the
type-id should not define a type.
So let's set parser->type_definition_forbidden_message in that
case; cp_parser_check_type_definition (called by
cp_parser_class_specifier) will then emit an error if a type is
defined in the type-id. */
if (parser->num_template_parameter_lists)
{
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message =
G_("types may not be defined in alias template declarations");
}
type = cp_parser_type_id (parser);
/* Restore the error message if need be. */
if (parser->num_template_parameter_lists)
parser->type_definition_forbidden_message = saved_message;
if (type == error_mark_node
|| !cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON))
{
cp_parser_skip_to_end_of_block_or_statement (parser);
return error_mark_node;
}
/* A typedef-name can also be introduced by an alias-declaration. The
identifier following the using keyword becomes a typedef-name. It has
the same semantics as if it were introduced by the typedef
specifier. In particular, it does not define a new type and it shall
not appear in the type-id. */
clear_decl_specs (&decl_specs);
decl_specs.type = type;
if (attributes != NULL_TREE)
{
decl_specs.attributes = attributes;
set_and_check_decl_spec_loc (&decl_specs,
ds_attribute,
attrs_token);
}
set_and_check_decl_spec_loc (&decl_specs,
ds_typedef,
using_token);
set_and_check_decl_spec_loc (&decl_specs,
ds_alias,
using_token);
declarator = make_id_declarator (NULL_TREE, id, sfk_none);
declarator->id_loc = id_location;
member_p = at_class_scope_p ();
if (member_p)
decl = grokfield (declarator, &decl_specs, NULL_TREE, false,
NULL_TREE, attributes);
else
decl = start_decl (declarator, &decl_specs, 0,
attributes, NULL_TREE, &pushed_scope);
if (decl == error_mark_node)
return decl;
cp_finish_decl (decl, NULL_TREE, 0, NULL_TREE, 0);
if (pushed_scope)
pop_scope (pushed_scope);
/* If decl is a template, return its TEMPLATE_DECL so that it gets
added into the symbol table; otherwise, return the TYPE_DECL. */
if (DECL_LANG_SPECIFIC (decl)
&& DECL_TEMPLATE_INFO (decl)
&& PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl)))
{
decl = DECL_TI_TEMPLATE (decl);
if (member_p)
check_member_template (decl);
}
return decl;
}
/* Parse a using-directive.
using-directive:
using namespace :: [opt] nested-name-specifier [opt]
namespace-name ; */
static void
cp_parser_using_directive (cp_parser* parser)
{
tree namespace_decl;
tree attribs;
/* Look for the `using' keyword. */
cp_parser_require_keyword (parser, RID_USING, RT_USING);
/* And the `namespace' keyword. */
cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE);
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false);
/* And the optional nested-name-specifier. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/true);
/* Get the namespace being used. */
namespace_decl = cp_parser_namespace_name (parser);
/* And any specified attributes. */
attribs = cp_parser_attributes_opt (parser);
/* Update the symbol table. */
parse_using_directive (namespace_decl, attribs);
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
}
/* Parse an asm-definition.
asm-definition:
asm ( string-literal ) ;
GNU Extension:
asm-definition:
asm volatile [opt] ( string-literal ) ;
asm volatile [opt] ( string-literal : asm-operand-list [opt] ) ;
asm volatile [opt] ( string-literal : asm-operand-list [opt]
: asm-operand-list [opt] ) ;
asm volatile [opt] ( string-literal : asm-operand-list [opt]
: asm-operand-list [opt]
: asm-clobber-list [opt] ) ;
asm volatile [opt] goto ( string-literal : : asm-operand-list [opt]
: asm-clobber-list [opt]
: asm-goto-list ) ; */
static void
cp_parser_asm_definition (cp_parser* parser)
{
tree string;
tree outputs = NULL_TREE;
tree inputs = NULL_TREE;
tree clobbers = NULL_TREE;
tree labels = NULL_TREE;
tree asm_stmt;
bool volatile_p = false;
bool extended_p = false;
bool invalid_inputs_p = false;
bool invalid_outputs_p = false;
bool goto_p = false;
required_token missing = RT_NONE;
/* Look for the `asm' keyword. */
cp_parser_require_keyword (parser, RID_ASM, RT_ASM);
if (parser->in_function_body
&& DECL_DECLARED_CONSTEXPR_P (current_function_decl))
{
error ("%<asm%> in %<constexpr%> function");
cp_function_chain->invalid_constexpr = true;
}
/* See if the next token is `volatile'. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_VOLATILE))
{
/* Remember that we saw the `volatile' keyword. */
volatile_p = true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
if (cp_parser_allow_gnu_extensions_p (parser)
&& parser->in_function_body
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_GOTO))
{
/* Remember that we saw the `goto' keyword. */
goto_p = true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
/* Look for the opening `('. */
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return;
/* Look for the string. */
string = cp_parser_string_literal (parser, false, false);
if (string == error_mark_node)
{
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
return;
}
/* If we're allowing GNU extensions, check for the extended assembly
syntax. Unfortunately, the `:' tokens need not be separated by
a space in C, and so, for compatibility, we tolerate that here
too. Doing that means that we have to treat the `::' operator as
two `:' tokens. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& parser->in_function_body
&& (cp_lexer_next_token_is (parser->lexer, CPP_COLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)))
{
bool inputs_p = false;
bool clobbers_p = false;
bool labels_p = false;
/* The extended syntax was used. */
extended_p = true;
/* Look for outputs. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* Consume the `:'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the output-operands. */
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_SCOPE)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_CLOSE_PAREN)
&& !goto_p)
outputs = cp_parser_asm_operand_list (parser);
if (outputs == error_mark_node)
invalid_outputs_p = true;
}
/* If the next token is `::', there are no outputs, and the
next token is the beginning of the inputs. */
else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* The inputs are coming next. */
inputs_p = true;
/* Look for inputs. */
if (inputs_p
|| cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* Consume the `:' or `::'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the output-operands. */
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_SCOPE)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_CLOSE_PAREN))
inputs = cp_parser_asm_operand_list (parser);
if (inputs == error_mark_node)
invalid_inputs_p = true;
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* The clobbers are coming next. */
clobbers_p = true;
/* Look for clobbers. */
if (clobbers_p
|| cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
clobbers_p = true;
/* Consume the `:' or `::'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the clobbers. */
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_CLOSE_PAREN))
clobbers = cp_parser_asm_clobber_list (parser);
}
else if (goto_p
&& cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* The labels are coming next. */
labels_p = true;
/* Look for labels. */
if (labels_p
|| (goto_p && cp_lexer_next_token_is (parser->lexer, CPP_COLON)))
{
labels_p = true;
/* Consume the `:' or `::'. */
cp_lexer_consume_token (parser->lexer);
/* Parse the labels. */
labels = cp_parser_asm_label_list (parser);
}
if (goto_p && !labels_p)
missing = clobbers_p ? RT_COLON : RT_COLON_SCOPE;
}
else if (goto_p)
missing = RT_COLON_SCOPE;
/* Look for the closing `)'. */
if (!cp_parser_require (parser, missing ? CPP_COLON : CPP_CLOSE_PAREN,
missing ? missing : RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, true, false,
/*consume_paren=*/true);
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (!invalid_inputs_p && !invalid_outputs_p)
{
/* Create the ASM_EXPR. */
if (parser->in_function_body)
{
asm_stmt = finish_asm_stmt (volatile_p, string, outputs,
inputs, clobbers, labels);
/* If the extended syntax was not used, mark the ASM_EXPR. */
if (!extended_p)
{
tree temp = asm_stmt;
if (TREE_CODE (temp) == CLEANUP_POINT_EXPR)
temp = TREE_OPERAND (temp, 0);
ASM_INPUT_P (temp) = 1;
}
}
else
symtab->finalize_toplevel_asm (string);
}
}
/* Declarators [gram.dcl.decl] */
/* Parse an init-declarator.
init-declarator:
declarator initializer [opt]
GNU Extension:
init-declarator:
declarator asm-specification [opt] attributes [opt] initializer [opt]
function-definition:
decl-specifier-seq [opt] declarator ctor-initializer [opt]
function-body
decl-specifier-seq [opt] declarator function-try-block
GNU Extension:
function-definition:
__extension__ function-definition
TM Extension:
function-definition:
decl-specifier-seq [opt] declarator function-transaction-block
The DECL_SPECIFIERS apply to this declarator. Returns a
representation of the entity declared. If MEMBER_P is TRUE, then
this declarator appears in a class scope. The new DECL created by
this declarator is returned.
The CHECKS are access checks that should be performed once we know
what entity is being declared (and, therefore, what classes have
befriended it).
If FUNCTION_DEFINITION_ALLOWED_P then we handle the declarator and
for a function-definition here as well. If the declarator is a
declarator for a function-definition, *FUNCTION_DEFINITION_P will
be TRUE upon return. By that point, the function-definition will
have been completely parsed.
FUNCTION_DEFINITION_P may be NULL if FUNCTION_DEFINITION_ALLOWED_P
is FALSE.
If MAYBE_RANGE_FOR_DECL is not NULL, the pointed tree will be set to the
parsed declaration if it is an uninitialized single declarator not followed
by a `;', or to error_mark_node otherwise. Either way, the trailing `;',
if present, will not be consumed. If returned, this declarator will be
created with SD_INITIALIZED but will not call cp_finish_decl.
If INIT_LOC is not NULL, and *INIT_LOC is equal to UNKNOWN_LOCATION,
and there is an initializer, the pointed location_t is set to the
location of the '=' or `(', or '{' in C++11 token introducing the
initializer. */
static tree
cp_parser_init_declarator (cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
vec<deferred_access_check, va_gc> *checks,
bool function_definition_allowed_p,
bool member_p,
int declares_class_or_enum,
bool* function_definition_p,
tree* maybe_range_for_decl,
location_t* init_loc)
{
cp_token *token = NULL, *asm_spec_start_token = NULL,
*attributes_start_token = NULL;
cp_declarator *declarator;
tree prefix_attributes;
tree attributes = NULL;
tree asm_specification;
tree initializer;
tree decl = NULL_TREE;
tree scope;
int is_initialized;
/* Only valid if IS_INITIALIZED is true. In that case, CPP_EQ if
initialized with "= ..", CPP_OPEN_PAREN if initialized with
"(...)". */
enum cpp_ttype initialization_kind;
bool is_direct_init = false;
bool is_non_constant_init;
int ctor_dtor_or_conv_p;
bool friend_p = cp_parser_friend_p (decl_specifiers);
tree pushed_scope = NULL_TREE;
bool range_for_decl_p = false;
bool saved_default_arg_ok_p = parser->default_arg_ok_p;
location_t tmp_init_loc = UNKNOWN_LOCATION;
/* Gather the attributes that were provided with the
decl-specifiers. */
prefix_attributes = decl_specifiers->attributes;
/* Assume that this is not the declarator for a function
definition. */
if (function_definition_p)
*function_definition_p = false;
/* Default arguments are only permitted for function parameters. */
if (decl_spec_seq_has_spec_p (decl_specifiers, ds_typedef))
parser->default_arg_ok_p = false;
/* Defer access checks while parsing the declarator; we cannot know
what names are accessible until we know what is being
declared. */
resume_deferring_access_checks ();
/* Parse the declarator. */
token = cp_lexer_peek_token (parser->lexer);
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
member_p, friend_p);
/* Gather up the deferred checks. */
stop_deferring_access_checks ();
parser->default_arg_ok_p = saved_default_arg_ok_p;
/* If the DECLARATOR was erroneous, there's no need to go
further. */
if (declarator == cp_error_declarator)
return error_mark_node;
/* Check that the number of template-parameter-lists is OK. */
if (!cp_parser_check_declarator_template_parameters (parser, declarator,
token->location))
return error_mark_node;
if (declares_class_or_enum & 2)
cp_parser_check_for_definition_in_return_type (declarator,
decl_specifiers->type,
decl_specifiers->locations[ds_type_spec]);
/* Figure out what scope the entity declared by the DECLARATOR is
located in. `grokdeclarator' sometimes changes the scope, so
we compute it now. */
scope = get_scope_of_declarator (declarator);
/* Perform any lookups in the declared type which were thought to be
dependent, but are not in the scope of the declarator. */
decl_specifiers->type
= maybe_update_decl_type (decl_specifiers->type, scope);
/* If we're allowing GNU extensions, look for an
asm-specification. */
if (cp_parser_allow_gnu_extensions_p (parser))
{
/* Look for an asm-specification. */
asm_spec_start_token = cp_lexer_peek_token (parser->lexer);
asm_specification = cp_parser_asm_specification_opt (parser);
}
else
asm_specification = NULL_TREE;
/* Look for attributes. */
attributes_start_token = cp_lexer_peek_token (parser->lexer);
attributes = cp_parser_attributes_opt (parser);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
bool bogus_implicit_tmpl = false;
if (function_declarator_p (declarator))
{
/* Check to see if the token indicates the start of a
function-definition. */
if (cp_parser_token_starts_function_definition_p (token))
{
if (!function_definition_allowed_p)
{
/* If a function-definition should not appear here, issue an
error message. */
cp_parser_error (parser,
"a function-definition is not allowed here");
return error_mark_node;
}
location_t func_brace_location
= cp_lexer_peek_token (parser->lexer)->location;
/* Neither attributes nor an asm-specification are allowed
on a function-definition. */
if (asm_specification)
error_at (asm_spec_start_token->location,
"an asm-specification is not allowed "
"on a function-definition");
if (attributes)
error_at (attributes_start_token->location,
"attributes are not allowed "
"on a function-definition");
/* This is a function-definition. */
*function_definition_p = true;
/* Parse the function definition. */
if (member_p)
decl = cp_parser_save_member_function_body (parser,
decl_specifiers,
declarator,
prefix_attributes);
else
decl =
(cp_parser_function_definition_from_specifiers_and_declarator
(parser, decl_specifiers, prefix_attributes, declarator));
if (decl != error_mark_node && DECL_STRUCT_FUNCTION (decl))
{
/* This is where the prologue starts... */
DECL_STRUCT_FUNCTION (decl)->function_start_locus
= func_brace_location;
}
return decl;
}
}
else if (parser->fully_implicit_function_template_p)
{
/* A non-template declaration involving a function parameter list
containing an implicit template parameter will be made into a
template. If the resulting declaration is not going to be an
actual function then finish the template scope here to prevent it.
An error message will be issued once we have a decl to talk about.
FIXME probably we should do type deduction rather than create an
implicit template, but the standard currently doesn't allow it. */
bogus_implicit_tmpl = true;
finish_fully_implicit_template (parser, NULL_TREE);
}
/* [dcl.dcl]
Only in function declarations for constructors, destructors, and
type conversions can the decl-specifier-seq be omitted.
We explicitly postpone this check past the point where we handle
function-definitions because we tolerate function-definitions
that are missing their return types in some modes. */
if (!decl_specifiers->any_specifiers_p && ctor_dtor_or_conv_p <= 0)
{
cp_parser_error (parser,
"expected constructor, destructor, or type conversion");
return error_mark_node;
}
/* An `=' or an `(', or an '{' in C++0x, indicates an initializer. */
if (token->type == CPP_EQ
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_BRACE)
{
is_initialized = SD_INITIALIZED;
initialization_kind = token->type;
if (maybe_range_for_decl)
*maybe_range_for_decl = error_mark_node;
tmp_init_loc = token->location;
if (init_loc && *init_loc == UNKNOWN_LOCATION)
*init_loc = tmp_init_loc;
if (token->type == CPP_EQ
&& function_declarator_p (declarator))
{
cp_token *t2 = cp_lexer_peek_nth_token (parser->lexer, 2);
if (t2->keyword == RID_DEFAULT)
is_initialized = SD_DEFAULTED;
else if (t2->keyword == RID_DELETE)
is_initialized = SD_DELETED;
}
}
else
{
/* If the init-declarator isn't initialized and isn't followed by a
`,' or `;', it's not a valid init-declarator. */
if (token->type != CPP_COMMA
&& token->type != CPP_SEMICOLON)
{
if (maybe_range_for_decl && *maybe_range_for_decl != error_mark_node)
range_for_decl_p = true;
else
{
if (!maybe_range_for_decl)
cp_parser_error (parser, "expected initializer");
return error_mark_node;
}
}
is_initialized = SD_UNINITIALIZED;
initialization_kind = CPP_EOF;
}
/* Because start_decl has side-effects, we should only call it if we
know we're going ahead. By this point, we know that we cannot
possibly be looking at any other construct. */
cp_parser_commit_to_tentative_parse (parser);
/* Enter the newly declared entry in the symbol table. If we're
processing a declaration in a class-specifier, we wait until
after processing the initializer. */
if (!member_p)
{
if (parser->in_unbraced_linkage_specification_p)
decl_specifiers->storage_class = sc_extern;
decl = start_decl (declarator, decl_specifiers,
range_for_decl_p? SD_INITIALIZED : is_initialized,
attributes, prefix_attributes, &pushed_scope);
cp_finalize_omp_declare_simd (parser, decl);
/* Adjust location of decl if declarator->id_loc is more appropriate:
set, and decl wasn't merged with another decl, in which case its
location would be different from input_location, and more accurate. */
if (DECL_P (decl)
&& declarator->id_loc != UNKNOWN_LOCATION
&& DECL_SOURCE_LOCATION (decl) == input_location)
DECL_SOURCE_LOCATION (decl) = declarator->id_loc;
}
else if (scope)
/* Enter the SCOPE. That way unqualified names appearing in the
initializer will be looked up in SCOPE. */
pushed_scope = push_scope (scope);
/* Perform deferred access control checks, now that we know in which
SCOPE the declared entity resides. */
if (!member_p && decl)
{
tree saved_current_function_decl = NULL_TREE;
/* If the entity being declared is a function, pretend that we
are in its scope. If it is a `friend', it may have access to
things that would not otherwise be accessible. */
if (TREE_CODE (decl) == FUNCTION_DECL)
{
saved_current_function_decl = current_function_decl;
current_function_decl = decl;
}
/* Perform access checks for template parameters. */
cp_parser_perform_template_parameter_access_checks (checks);
/* Perform the access control checks for the declarator and the
decl-specifiers. */
perform_deferred_access_checks (tf_warning_or_error);
/* Restore the saved value. */
if (TREE_CODE (decl) == FUNCTION_DECL)
current_function_decl = saved_current_function_decl;
}
/* Parse the initializer. */
initializer = NULL_TREE;
is_direct_init = false;
is_non_constant_init = true;
if (is_initialized)
{
if (function_declarator_p (declarator))
{
if (initialization_kind == CPP_EQ)
initializer = cp_parser_pure_specifier (parser);
else
{
/* If the declaration was erroneous, we don't really
know what the user intended, so just silently
consume the initializer. */
if (decl != error_mark_node)
error_at (tmp_init_loc, "initializer provided for function");
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
}
}
else
{
/* We want to record the extra mangling scope for in-class
initializers of class members and initializers of static data
member templates. The former involves deferring
parsing of the initializer until end of class as with default
arguments. So right here we only handle the latter. */
if (!member_p && processing_template_decl)
start_lambda_scope (decl);
initializer = cp_parser_initializer (parser,
&is_direct_init,
&is_non_constant_init);
if (!member_p && processing_template_decl)
finish_lambda_scope ();
if (initializer == error_mark_node)
cp_parser_skip_to_end_of_statement (parser);
}
}
/* The old parser allows attributes to appear after a parenthesized
initializer. Mark Mitchell proposed removing this functionality
on the GCC mailing lists on 2002-08-13. This parser accepts the
attributes -- but ignores them. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& initialization_kind == CPP_OPEN_PAREN)
if (cp_parser_attributes_opt (parser))
warning (OPT_Wattributes,
"attributes after parenthesized initializer ignored");
/* And now complain about a non-function implicit template. */
if (bogus_implicit_tmpl)
error_at (DECL_SOURCE_LOCATION (decl),
"non-function %qD declared as implicit template", decl);
/* For an in-class declaration, use `grokfield' to create the
declaration. */
if (member_p)
{
if (pushed_scope)
{
pop_scope (pushed_scope);
pushed_scope = NULL_TREE;
}
decl = grokfield (declarator, decl_specifiers,
initializer, !is_non_constant_init,
/*asmspec=*/NULL_TREE,
chainon (attributes, prefix_attributes));
if (decl && TREE_CODE (decl) == FUNCTION_DECL)
cp_parser_save_default_args (parser, decl);
cp_finalize_omp_declare_simd (parser, decl);
}
/* Finish processing the declaration. But, skip member
declarations. */
if (!member_p && decl && decl != error_mark_node && !range_for_decl_p)
{
cp_finish_decl (decl,
initializer, !is_non_constant_init,
asm_specification,
/* If the initializer is in parentheses, then this is
a direct-initialization, which means that an
`explicit' constructor is OK. Otherwise, an
`explicit' constructor cannot be used. */
((is_direct_init || !is_initialized)
? LOOKUP_NORMAL : LOOKUP_IMPLICIT));
}
else if ((cxx_dialect != cxx98) && friend_p
&& decl && TREE_CODE (decl) == FUNCTION_DECL)
/* Core issue #226 (C++0x only): A default template-argument
shall not be specified in a friend class template
declaration. */
check_default_tmpl_args (decl, current_template_parms, /*is_primary=*/true,
/*is_partial=*/false, /*is_friend_decl=*/1);
if (!friend_p && pushed_scope)
pop_scope (pushed_scope);
if (function_declarator_p (declarator)
&& parser->fully_implicit_function_template_p)
{
if (member_p)
decl = finish_fully_implicit_template (parser, decl);
else
finish_fully_implicit_template (parser, /*member_decl_opt=*/0);
}
return decl;
}
/* Parse a declarator.
declarator:
direct-declarator
ptr-operator declarator
abstract-declarator:
ptr-operator abstract-declarator [opt]
direct-abstract-declarator
GNU Extensions:
declarator:
attributes [opt] direct-declarator
attributes [opt] ptr-operator declarator
abstract-declarator:
attributes [opt] ptr-operator abstract-declarator [opt]
attributes [opt] direct-abstract-declarator
If CTOR_DTOR_OR_CONV_P is not NULL, *CTOR_DTOR_OR_CONV_P is used to
detect constructor, destructor or conversion operators. It is set
to -1 if the declarator is a name, and +1 if it is a
function. Otherwise it is set to zero. Usually you just want to
test for >0, but internally the negative value is used.
(The reason for CTOR_DTOR_OR_CONV_P is that a declaration must have
a decl-specifier-seq unless it declares a constructor, destructor,
or conversion. It might seem that we could check this condition in
semantic analysis, rather than parsing, but that makes it difficult
to handle something like `f()'. We want to notice that there are
no decl-specifiers, and therefore realize that this is an
expression, not a declaration.)
If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to true iff
the declarator is a direct-declarator of the form "(...)".
MEMBER_P is true iff this declarator is a member-declarator.
FRIEND_P is true iff this declarator is a friend. */
static cp_declarator *
cp_parser_declarator (cp_parser* parser,
cp_parser_declarator_kind dcl_kind,
int* ctor_dtor_or_conv_p,
bool* parenthesized_p,
bool member_p, bool friend_p)
{
cp_declarator *declarator;
enum tree_code code;
cp_cv_quals cv_quals;
tree class_type;
tree gnu_attributes = NULL_TREE, std_attributes = NULL_TREE;
/* Assume this is not a constructor, destructor, or type-conversion
operator. */
if (ctor_dtor_or_conv_p)
*ctor_dtor_or_conv_p = 0;
if (cp_parser_allow_gnu_extensions_p (parser))
gnu_attributes = cp_parser_gnu_attributes_opt (parser);
/* Check for the ptr-operator production. */
cp_parser_parse_tentatively (parser);
/* Parse the ptr-operator. */
code = cp_parser_ptr_operator (parser,
&class_type,
&cv_quals,
&std_attributes);
/* If that worked, then we have a ptr-operator. */
if (cp_parser_parse_definitely (parser))
{
/* If a ptr-operator was found, then this declarator was not
parenthesized. */
if (parenthesized_p)
*parenthesized_p = true;
/* The dependent declarator is optional if we are parsing an
abstract-declarator. */
if (dcl_kind != CP_PARSER_DECLARATOR_NAMED)
cp_parser_parse_tentatively (parser);
/* Parse the dependent declarator. */
declarator = cp_parser_declarator (parser, dcl_kind,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false,
friend_p);
/* If we are parsing an abstract-declarator, we must handle the
case where the dependent declarator is absent. */
if (dcl_kind != CP_PARSER_DECLARATOR_NAMED
&& !cp_parser_parse_definitely (parser))
declarator = NULL;
declarator = cp_parser_make_indirect_declarator
(code, class_type, cv_quals, declarator, std_attributes);
}
/* Everything else is a direct-declarator. */
else
{
if (parenthesized_p)
*parenthesized_p = cp_lexer_next_token_is (parser->lexer,
CPP_OPEN_PAREN);
declarator = cp_parser_direct_declarator (parser, dcl_kind,
ctor_dtor_or_conv_p,
member_p, friend_p);
}
if (gnu_attributes && declarator && declarator != cp_error_declarator)
declarator->attributes = gnu_attributes;
return declarator;
}
/* Parse a direct-declarator or direct-abstract-declarator.
direct-declarator:
declarator-id
direct-declarator ( parameter-declaration-clause )
cv-qualifier-seq [opt]
ref-qualifier [opt]
exception-specification [opt]
direct-declarator [ constant-expression [opt] ]
( declarator )
direct-abstract-declarator:
direct-abstract-declarator [opt]
( parameter-declaration-clause )
cv-qualifier-seq [opt]
ref-qualifier [opt]
exception-specification [opt]
direct-abstract-declarator [opt] [ constant-expression [opt] ]
( abstract-declarator )
Returns a representation of the declarator. DCL_KIND is
CP_PARSER_DECLARATOR_ABSTRACT, if we are parsing a
direct-abstract-declarator. It is CP_PARSER_DECLARATOR_NAMED, if
we are parsing a direct-declarator. It is
CP_PARSER_DECLARATOR_EITHER, if we can accept either - in the case
of ambiguity we prefer an abstract declarator, as per
[dcl.ambig.res]. CTOR_DTOR_OR_CONV_P, MEMBER_P, and FRIEND_P are
as for cp_parser_declarator. */
static cp_declarator *
cp_parser_direct_declarator (cp_parser* parser,
cp_parser_declarator_kind dcl_kind,
int* ctor_dtor_or_conv_p,
bool member_p, bool friend_p)
{
cp_token *token;
cp_declarator *declarator = NULL;
tree scope = NULL_TREE;
bool saved_default_arg_ok_p = parser->default_arg_ok_p;
bool saved_in_declarator_p = parser->in_declarator_p;
bool first = true;
tree pushed_scope = NULL_TREE;
while (true)
{
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_OPEN_PAREN)
{
/* This is either a parameter-declaration-clause, or a
parenthesized declarator. When we know we are parsing a
named declarator, it must be a parenthesized declarator
if FIRST is true. For instance, `(int)' is a
parameter-declaration-clause, with an omitted
direct-abstract-declarator. But `((*))', is a
parenthesized abstract declarator. Finally, when T is a
template parameter `(T)' is a
parameter-declaration-clause, and not a parenthesized
named declarator.
We first try and parse a parameter-declaration-clause,
and then try a nested declarator (if FIRST is true).
It is not an error for it not to be a
parameter-declaration-clause, even when FIRST is
false. Consider,
int i (int);
int i (3);
The first is the declaration of a function while the
second is the definition of a variable, including its
initializer.
Having seen only the parenthesis, we cannot know which of
these two alternatives should be selected. Even more
complex are examples like:
int i (int (a));
int i (int (3));
The former is a function-declaration; the latter is a
variable initialization.
Thus again, we try a parameter-declaration-clause, and if
that fails, we back out and return. */
if (!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED)
{
tree params;
bool is_declarator = false;
/* In a member-declarator, the only valid interpretation
of a parenthesis is the start of a
parameter-declaration-clause. (It is invalid to
initialize a static data member with a parenthesized
initializer; only the "=" form of initialization is
permitted.) */
if (!member_p)
cp_parser_parse_tentatively (parser);
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
if (first)
{
/* If this is going to be an abstract declarator, we're
in a declarator and we can't have default args. */
parser->default_arg_ok_p = false;
parser->in_declarator_p = true;
}
begin_scope (sk_function_parms, NULL_TREE);
/* Parse the parameter-declaration-clause. */
params = cp_parser_parameter_declaration_clause (parser);
/* Consume the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* If all went well, parse the cv-qualifier-seq,
ref-qualifier and the exception-specification. */
if (member_p || cp_parser_parse_definitely (parser))
{
cp_cv_quals cv_quals;
cp_virt_specifiers virt_specifiers;
cp_ref_qualifier ref_qual;
tree exception_specification;
tree late_return;
tree attrs;
bool memfn = (member_p || (pushed_scope
&& CLASS_TYPE_P (pushed_scope)));
is_declarator = true;
if (ctor_dtor_or_conv_p)
*ctor_dtor_or_conv_p = *ctor_dtor_or_conv_p < 0;
first = false;
/* Parse the cv-qualifier-seq. */
cv_quals = cp_parser_cv_qualifier_seq_opt (parser);
/* Parse the ref-qualifier. */
ref_qual = cp_parser_ref_qualifier_opt (parser);
/* And the exception-specification. */
exception_specification
= cp_parser_exception_specification_opt (parser);
attrs = cp_parser_std_attribute_spec_seq (parser);
/* In here, we handle cases where attribute is used after
the function declaration. For example:
void func (int x) __attribute__((vector(..))); */
if (flag_cilkplus
&& cp_next_tokens_can_be_gnu_attribute_p (parser))
{
cp_parser_parse_tentatively (parser);
tree attr = cp_parser_gnu_attributes_opt (parser);
if (cp_lexer_next_token_is_not (parser->lexer,
CPP_SEMICOLON)
&& cp_lexer_next_token_is_not (parser->lexer,
CPP_OPEN_BRACE))
cp_parser_abort_tentative_parse (parser);
else if (!cp_parser_parse_definitely (parser))
;
else
attrs = chainon (attr, attrs);
}
late_return = (cp_parser_late_return_type_opt
(parser, declarator,
memfn ? cv_quals : -1));
/* Parse the virt-specifier-seq. */
virt_specifiers = cp_parser_virt_specifier_seq_opt (parser);
/* Create the function-declarator. */
declarator = make_call_declarator (declarator,
params,
cv_quals,
virt_specifiers,
ref_qual,
exception_specification,
late_return);
declarator->std_attributes = attrs;
/* Any subsequent parameter lists are to do with
return type, so are not those of the declared
function. */
parser->default_arg_ok_p = false;
}
/* Remove the function parms from scope. */
pop_bindings_and_leave_scope ();
if (is_declarator)
/* Repeat the main loop. */
continue;
}
/* If this is the first, we can try a parenthesized
declarator. */
if (first)
{
bool saved_in_type_id_in_expr_p;
parser->default_arg_ok_p = saved_default_arg_ok_p;
parser->in_declarator_p = saved_in_declarator_p;
/* Consume the `('. */
cp_lexer_consume_token (parser->lexer);
/* Parse the nested declarator. */
saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
declarator
= cp_parser_declarator (parser, dcl_kind, ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
member_p, friend_p);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
first = false;
/* Expect a `)'. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
declarator = cp_error_declarator;
if (declarator == cp_error_declarator)
break;
goto handle_declarator;
}
/* Otherwise, we must be done. */
else
break;
}
else if ((!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED)
&& token->type == CPP_OPEN_SQUARE
&& !cp_next_tokens_can_be_attribute_p (parser))
{
/* Parse an array-declarator. */
tree bounds, attrs;
if (ctor_dtor_or_conv_p)
*ctor_dtor_or_conv_p = 0;
first = false;
parser->default_arg_ok_p = false;
parser->in_declarator_p = true;
/* Consume the `['. */
cp_lexer_consume_token (parser->lexer);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is `]', then there is no
constant-expression. */
if (token->type != CPP_CLOSE_SQUARE)
{
bool non_constant_p;
bounds
= cp_parser_constant_expression (parser,
/*allow_non_constant=*/true,
&non_constant_p);
if (!non_constant_p)
/* OK */;
else if (error_operand_p (bounds))
/* Already gave an error. */;
else if (!parser->in_function_body
|| current_binding_level->kind == sk_function_parms)
{
/* Normally, the array bound must be an integral constant
expression. However, as an extension, we allow VLAs
in function scopes as long as they aren't part of a
parameter declaration. */
cp_parser_error (parser,
"array bound is not an integer constant");
bounds = error_mark_node;
}
else if (processing_template_decl
&& !type_dependent_expression_p (bounds))
{
/* Remember this wasn't a constant-expression. */
bounds = build_nop (TREE_TYPE (bounds), bounds);
TREE_SIDE_EFFECTS (bounds) = 1;
}
}
else
bounds = NULL_TREE;
/* Look for the closing `]'. */
if (!cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE))
{
declarator = cp_error_declarator;
break;
}
attrs = cp_parser_std_attribute_spec_seq (parser);
declarator = make_array_declarator (declarator, bounds);
declarator->std_attributes = attrs;
}
else if (first && dcl_kind != CP_PARSER_DECLARATOR_ABSTRACT)
{
{
tree qualifying_scope;
tree unqualified_name;
tree attrs;
special_function_kind sfk;
bool abstract_ok;
bool pack_expansion_p = false;
cp_token *declarator_id_start_token;
/* Parse a declarator-id */
abstract_ok = (dcl_kind == CP_PARSER_DECLARATOR_EITHER);
if (abstract_ok)
{
cp_parser_parse_tentatively (parser);
/* If we see an ellipsis, we should be looking at a
parameter pack. */
if (token->type == CPP_ELLIPSIS)
{
/* Consume the `...' */
cp_lexer_consume_token (parser->lexer);
pack_expansion_p = true;
}
}
declarator_id_start_token = cp_lexer_peek_token (parser->lexer);
unqualified_name
= cp_parser_declarator_id (parser, /*optional_p=*/abstract_ok);
qualifying_scope = parser->scope;
if (abstract_ok)
{
bool okay = false;
if (!unqualified_name && pack_expansion_p)
{
/* Check whether an error occurred. */
okay = !cp_parser_error_occurred (parser);
/* We already consumed the ellipsis to mark a
parameter pack, but we have no way to report it,
so abort the tentative parse. We will be exiting
immediately anyway. */
cp_parser_abort_tentative_parse (parser);
}
else
okay = cp_parser_parse_definitely (parser);
if (!okay)
unqualified_name = error_mark_node;
else if (unqualified_name
&& (qualifying_scope
|| (!identifier_p (unqualified_name))))
{
cp_parser_error (parser, "expected unqualified-id");
unqualified_name = error_mark_node;
}
}
if (!unqualified_name)
return NULL;
if (unqualified_name == error_mark_node)
{
declarator = cp_error_declarator;
pack_expansion_p = false;
declarator->parameter_pack_p = false;
break;
}
attrs = cp_parser_std_attribute_spec_seq (parser);
if (qualifying_scope && at_namespace_scope_p ()
&& TREE_CODE (qualifying_scope) == TYPENAME_TYPE)
{
/* In the declaration of a member of a template class
outside of the class itself, the SCOPE will sometimes
be a TYPENAME_TYPE. For example, given:
template <typename T>
int S<T>::R::i = 3;
the SCOPE will be a TYPENAME_TYPE for `S<T>::R'. In
this context, we must resolve S<T>::R to an ordinary
type, rather than a typename type.
The reason we normally avoid resolving TYPENAME_TYPEs
is that a specialization of `S' might render
`S<T>::R' not a type. However, if `S' is
specialized, then this `i' will not be used, so there
is no harm in resolving the types here. */
tree type;
/* Resolve the TYPENAME_TYPE. */
type = resolve_typename_type (qualifying_scope,
/*only_current_p=*/false);
/* If that failed, the declarator is invalid. */
if (TREE_CODE (type) == TYPENAME_TYPE)
{
if (typedef_variant_p (type))
error_at (declarator_id_start_token->location,
"cannot define member of dependent typedef "
"%qT", type);
else
error_at (declarator_id_start_token->location,
"%<%T::%E%> is not a type",
TYPE_CONTEXT (qualifying_scope),
TYPE_IDENTIFIER (qualifying_scope));
}
qualifying_scope = type;
}
sfk = sfk_none;
if (unqualified_name)
{
tree class_type;
if (qualifying_scope
&& CLASS_TYPE_P (qualifying_scope))
class_type = qualifying_scope;
else
class_type = current_class_type;
if (TREE_CODE (unqualified_name) == TYPE_DECL)
{
tree name_type = TREE_TYPE (unqualified_name);
if (class_type && same_type_p (name_type, class_type))
{
if (qualifying_scope
&& CLASSTYPE_USE_TEMPLATE (name_type))
{
error_at (declarator_id_start_token->location,
"invalid use of constructor as a template");
inform (declarator_id_start_token->location,
"use %<%T::%D%> instead of %<%T::%D%> to "
"name the constructor in a qualified name",
class_type,
DECL_NAME (TYPE_TI_TEMPLATE (class_type)),
class_type, name_type);
declarator = cp_error_declarator;
break;
}
else
unqualified_name = constructor_name (class_type);
}
else
{
/* We do not attempt to print the declarator
here because we do not have enough
information about its original syntactic
form. */
cp_parser_error (parser, "invalid declarator");
declarator = cp_error_declarator;
break;
}
}
if (class_type)
{
if (TREE_CODE (unqualified_name) == BIT_NOT_EXPR)
sfk = sfk_destructor;
else if (IDENTIFIER_TYPENAME_P (unqualified_name))
sfk = sfk_conversion;
else if (/* There's no way to declare a constructor
for an anonymous type, even if the type
got a name for linkage purposes. */
!TYPE_WAS_ANONYMOUS (class_type)
/* Handle correctly (c++/19200):
struct S {
struct T{};
friend void S(T);
};
and also:
namespace N {
void S();
}
struct S {
friend void N::S();
}; */
&& !(friend_p
&& class_type != qualifying_scope)
&& constructor_name_p (unqualified_name,
class_type))
{
unqualified_name = constructor_name (class_type);
sfk = sfk_constructor;
}
else if (is_overloaded_fn (unqualified_name)
&& DECL_CONSTRUCTOR_P (get_first_fn
(unqualified_name)))
sfk = sfk_constructor;
if (ctor_dtor_or_conv_p && sfk != sfk_none)
*ctor_dtor_or_conv_p = -1;
}
}
declarator = make_id_declarator (qualifying_scope,
unqualified_name,
sfk);
declarator->std_attributes = attrs;
declarator->id_loc = token->location;
declarator->parameter_pack_p = pack_expansion_p;
if (pack_expansion_p)
maybe_warn_variadic_templates ();
}
handle_declarator:;
scope = get_scope_of_declarator (declarator);
if (scope)
{
/* Any names that appear after the declarator-id for a
member are looked up in the containing scope. */
if (at_function_scope_p ())
{
/* But declarations with qualified-ids can't appear in a
function. */
cp_parser_error (parser, "qualified-id in declaration");
declarator = cp_error_declarator;
break;
}
pushed_scope = push_scope (scope);
}
parser->in_declarator_p = true;
if ((ctor_dtor_or_conv_p && *ctor_dtor_or_conv_p)
|| (declarator && declarator->kind == cdk_id))
/* Default args are only allowed on function
declarations. */
parser->default_arg_ok_p = saved_default_arg_ok_p;
else
parser->default_arg_ok_p = false;
first = false;
}
/* We're done. */
else
break;
}
/* For an abstract declarator, we might wind up with nothing at this
point. That's an error; the declarator is not optional. */
if (!declarator)
cp_parser_error (parser, "expected declarator");
/* If we entered a scope, we must exit it now. */
if (pushed_scope)
pop_scope (pushed_scope);
parser->default_arg_ok_p = saved_default_arg_ok_p;
parser->in_declarator_p = saved_in_declarator_p;
return declarator;
}
/* Parse a ptr-operator.
ptr-operator:
* attribute-specifier-seq [opt] cv-qualifier-seq [opt] (C++11)
* cv-qualifier-seq [opt]
&
:: [opt] nested-name-specifier * cv-qualifier-seq [opt]
nested-name-specifier * attribute-specifier-seq [opt] cv-qualifier-seq [opt] (C++11)
GNU Extension:
ptr-operator:
& cv-qualifier-seq [opt]
Returns INDIRECT_REF if a pointer, or pointer-to-member, was used.
Returns ADDR_EXPR if a reference was used, or NON_LVALUE_EXPR for
an rvalue reference. In the case of a pointer-to-member, *TYPE is
filled in with the TYPE containing the member. *CV_QUALS is
filled in with the cv-qualifier-seq, or TYPE_UNQUALIFIED, if there
are no cv-qualifiers. Returns ERROR_MARK if an error occurred.
Note that the tree codes returned by this function have nothing
to do with the types of trees that will be eventually be created
to represent the pointer or reference type being parsed. They are
just constants with suggestive names. */
static enum tree_code
cp_parser_ptr_operator (cp_parser* parser,
tree* type,
cp_cv_quals *cv_quals,
tree *attributes)
{
enum tree_code code = ERROR_MARK;
cp_token *token;
tree attrs = NULL_TREE;
/* Assume that it's not a pointer-to-member. */
*type = NULL_TREE;
/* And that there are no cv-qualifiers. */
*cv_quals = TYPE_UNQUALIFIED;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `*', `&' or `&&' we have a pointer or reference. */
if (token->type == CPP_MULT)
code = INDIRECT_REF;
else if (token->type == CPP_AND)
code = ADDR_EXPR;
else if ((cxx_dialect != cxx98) &&
token->type == CPP_AND_AND) /* C++0x only */
code = NON_LVALUE_EXPR;
if (code != ERROR_MARK)
{
/* Consume the `*', `&' or `&&'. */
cp_lexer_consume_token (parser->lexer);
/* A `*' can be followed by a cv-qualifier-seq, and so can a
`&', if we are allowing GNU extensions. (The only qualifier
that can legally appear after `&' is `restrict', but that is
enforced during semantic analysis. */
if (code == INDIRECT_REF
|| cp_parser_allow_gnu_extensions_p (parser))
*cv_quals = cp_parser_cv_qualifier_seq_opt (parser);
attrs = cp_parser_std_attribute_spec_seq (parser);
if (attributes != NULL)
*attributes = attrs;
}
else
{
/* Try the pointer-to-member case. */
cp_parser_parse_tentatively (parser);
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the nested-name specifier. */
token = cp_lexer_peek_token (parser->lexer);
cp_parser_nested_name_specifier (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/true,
/*type_p=*/false,
/*is_declaration=*/false);
/* If we found it, and the next token is a `*', then we are
indeed looking at a pointer-to-member operator. */
if (!cp_parser_error_occurred (parser)
&& cp_parser_require (parser, CPP_MULT, RT_MULT))
{
/* Indicate that the `*' operator was used. */
code = INDIRECT_REF;
if (TREE_CODE (parser->scope) == NAMESPACE_DECL)
error_at (token->location, "%qD is a namespace", parser->scope);
else if (TREE_CODE (parser->scope) == ENUMERAL_TYPE)
error_at (token->location, "cannot form pointer to member of "
"non-class %q#T", parser->scope);
else
{
/* The type of which the member is a member is given by the
current SCOPE. */
*type = parser->scope;
/* The next name will not be qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
/* Look for optional c++11 attributes. */
attrs = cp_parser_std_attribute_spec_seq (parser);
if (attributes != NULL)
*attributes = attrs;
/* Look for the optional cv-qualifier-seq. */
*cv_quals = cp_parser_cv_qualifier_seq_opt (parser);
}
}
/* If that didn't work we don't have a ptr-operator. */
if (!cp_parser_parse_definitely (parser))
cp_parser_error (parser, "expected ptr-operator");
}
return code;
}
/* Parse an (optional) cv-qualifier-seq.
cv-qualifier-seq:
cv-qualifier cv-qualifier-seq [opt]
cv-qualifier:
const
volatile
GNU Extension:
cv-qualifier:
__restrict__
Returns a bitmask representing the cv-qualifiers. */
static cp_cv_quals
cp_parser_cv_qualifier_seq_opt (cp_parser* parser)
{
cp_cv_quals cv_quals = TYPE_UNQUALIFIED;
while (true)
{
cp_token *token;
cp_cv_quals cv_qualifier;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* See if it's a cv-qualifier. */
switch (token->keyword)
{
case RID_CONST:
cv_qualifier = TYPE_QUAL_CONST;
break;
case RID_VOLATILE:
cv_qualifier = TYPE_QUAL_VOLATILE;
break;
case RID_RESTRICT:
cv_qualifier = TYPE_QUAL_RESTRICT;
break;
default:
cv_qualifier = TYPE_UNQUALIFIED;
break;
}
if (!cv_qualifier)
break;
if (cv_quals & cv_qualifier)
{
error_at (token->location, "duplicate cv-qualifier");
cp_lexer_purge_token (parser->lexer);
}
else
{
cp_lexer_consume_token (parser->lexer);
cv_quals |= cv_qualifier;
}
}
return cv_quals;
}
/* Parse an (optional) ref-qualifier
ref-qualifier:
&
&&
Returns cp_ref_qualifier representing ref-qualifier. */
static cp_ref_qualifier
cp_parser_ref_qualifier_opt (cp_parser* parser)
{
cp_ref_qualifier ref_qual = REF_QUAL_NONE;
/* Don't try to parse bitwise '&' as a ref-qualifier (c++/57532). */
if (cxx_dialect < cxx11 && cp_parser_parsing_tentatively (parser))
return ref_qual;
while (true)
{
cp_ref_qualifier curr_ref_qual = REF_QUAL_NONE;
cp_token *token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_AND:
curr_ref_qual = REF_QUAL_LVALUE;
break;
case CPP_AND_AND:
curr_ref_qual = REF_QUAL_RVALUE;
break;
default:
curr_ref_qual = REF_QUAL_NONE;
break;
}
if (!curr_ref_qual)
break;
else if (ref_qual)
{
error_at (token->location, "multiple ref-qualifiers");
cp_lexer_purge_token (parser->lexer);
}
else
{
ref_qual = curr_ref_qual;
cp_lexer_consume_token (parser->lexer);
}
}
return ref_qual;
}
/* Parse an (optional) virt-specifier-seq.
virt-specifier-seq:
virt-specifier virt-specifier-seq [opt]
virt-specifier:
override
final
Returns a bitmask representing the virt-specifiers. */
static cp_virt_specifiers
cp_parser_virt_specifier_seq_opt (cp_parser* parser)
{
cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED;
while (true)
{
cp_token *token;
cp_virt_specifiers virt_specifier;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* See if it's a virt-specifier-qualifier. */
if (token->type != CPP_NAME)
break;
if (!strcmp (IDENTIFIER_POINTER(token->u.value), "override"))
{
maybe_warn_cpp0x (CPP0X_OVERRIDE_CONTROLS);
virt_specifier = VIRT_SPEC_OVERRIDE;
}
else if (!strcmp (IDENTIFIER_POINTER(token->u.value), "final"))
{
maybe_warn_cpp0x (CPP0X_OVERRIDE_CONTROLS);
virt_specifier = VIRT_SPEC_FINAL;
}
else if (!strcmp (IDENTIFIER_POINTER(token->u.value), "__final"))
{
virt_specifier = VIRT_SPEC_FINAL;
}
else
break;
if (virt_specifiers & virt_specifier)
{
error_at (token->location, "duplicate virt-specifier");
cp_lexer_purge_token (parser->lexer);
}
else
{
cp_lexer_consume_token (parser->lexer);
virt_specifiers |= virt_specifier;
}
}
return virt_specifiers;
}
/* Used by handling of trailing-return-types and NSDMI, in which 'this'
is in scope even though it isn't real. */
void
inject_this_parameter (tree ctype, cp_cv_quals quals)
{
tree this_parm;
if (current_class_ptr)
{
/* We don't clear this between NSDMIs. Is it already what we want? */
tree type = TREE_TYPE (TREE_TYPE (current_class_ptr));
if (same_type_ignoring_top_level_qualifiers_p (ctype, type)
&& cp_type_quals (type) == quals)
return;
}
this_parm = build_this_parm (ctype, quals);
/* Clear this first to avoid shortcut in cp_build_indirect_ref. */
current_class_ptr = NULL_TREE;
current_class_ref
= cp_build_indirect_ref (this_parm, RO_NULL, tf_warning_or_error);
current_class_ptr = this_parm;
}
/* Return true iff our current scope is a non-static data member
initializer. */
bool
parsing_nsdmi (void)
{
/* We recognize NSDMI context by the context-less 'this' pointer set up
by the function above. */
if (current_class_ptr
&& TREE_CODE (current_class_ptr) == PARM_DECL
&& DECL_CONTEXT (current_class_ptr) == NULL_TREE)
return true;
return false;
}
/* Parse a late-specified return type, if any. This is not a separate
non-terminal, but part of a function declarator, which looks like
-> trailing-type-specifier-seq abstract-declarator(opt)
Returns the type indicated by the type-id.
In addition to this this parses any queued up omp declare simd
clauses and Cilk Plus SIMD-enabled function's vector attributes.
QUALS is either a bitmask of cv_qualifiers or -1 for a non-member
function. */
static tree
cp_parser_late_return_type_opt (cp_parser* parser, cp_declarator *declarator,
cp_cv_quals quals)
{
cp_token *token;
tree type = NULL_TREE;
bool declare_simd_p = (parser->omp_declare_simd
&& declarator
&& declarator->kind == cdk_id);
bool cilk_simd_fn_vector_p = (parser->cilk_simd_fn_info
&& declarator && declarator->kind == cdk_id);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* A late-specified return type is indicated by an initial '->'. */
if (token->type != CPP_DEREF && !(declare_simd_p || cilk_simd_fn_vector_p))
return NULL_TREE;
tree save_ccp = current_class_ptr;
tree save_ccr = current_class_ref;
if (quals >= 0)
{
/* DR 1207: 'this' is in scope in the trailing return type. */
inject_this_parameter (current_class_type, quals);
}
if (token->type == CPP_DEREF)
{
/* Consume the ->. */
cp_lexer_consume_token (parser->lexer);
type = cp_parser_trailing_type_id (parser);
}
if (cilk_simd_fn_vector_p)
declarator->std_attributes
= cp_parser_late_parsing_cilk_simd_fn_info (parser,
declarator->std_attributes);
if (declare_simd_p)
declarator->std_attributes
= cp_parser_late_parsing_omp_declare_simd (parser,
declarator->std_attributes);
if (quals >= 0)
{
current_class_ptr = save_ccp;
current_class_ref = save_ccr;
}
return type;
}
/* Parse a declarator-id.
declarator-id:
id-expression
:: [opt] nested-name-specifier [opt] type-name
In the `id-expression' case, the value returned is as for
cp_parser_id_expression if the id-expression was an unqualified-id.
If the id-expression was a qualified-id, then a SCOPE_REF is
returned. The first operand is the scope (either a NAMESPACE_DECL
or TREE_TYPE), but the second is still just a representation of an
unqualified-id. */
static tree
cp_parser_declarator_id (cp_parser* parser, bool optional_p)
{
tree id;
/* The expression must be an id-expression. Assume that qualified
names are the names of types so that:
template <class T>
int S<T>::R::i = 3;
will work; we must treat `S<T>::R' as the name of a type.
Similarly, assume that qualified names are templates, where
required, so that:
template <class T>
int S<T>::R<T>::i = 3;
will work, too. */
id = cp_parser_id_expression (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/false,
/*template_p=*/NULL,
/*declarator_p=*/true,
optional_p);
if (id && BASELINK_P (id))
id = BASELINK_FUNCTIONS (id);
return id;
}
/* Parse a type-id.
type-id:
type-specifier-seq abstract-declarator [opt]
Returns the TYPE specified. */
static tree
cp_parser_type_id_1 (cp_parser* parser, bool is_template_arg,
bool is_trailing_return)
{
cp_decl_specifier_seq type_specifier_seq;
cp_declarator *abstract_declarator;
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/false,
is_trailing_return,
&type_specifier_seq);
if (type_specifier_seq.type == error_mark_node)
return error_mark_node;
/* There might or might not be an abstract declarator. */
cp_parser_parse_tentatively (parser);
/* Look for the declarator. */
abstract_declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_ABSTRACT, NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false,
/*friend_p=*/false);
/* Check to see if there really was a declarator. */
if (!cp_parser_parse_definitely (parser))
abstract_declarator = NULL;
if (type_specifier_seq.type
/* None of the valid uses of 'auto' in C++14 involve the type-id
nonterminal, but it is valid in a trailing-return-type. */
&& !(cxx_dialect >= cxx14 && is_trailing_return)
&& type_uses_auto (type_specifier_seq.type))
{
/* A type-id with type 'auto' is only ok if the abstract declarator
is a function declarator with a late-specified return type. */
if (abstract_declarator
&& abstract_declarator->kind == cdk_function
&& abstract_declarator->u.function.late_return_type)
/* OK */;
else
{
error ("invalid use of %<auto%>");
return error_mark_node;
}
}
return groktypename (&type_specifier_seq, abstract_declarator,
is_template_arg);
}
static tree cp_parser_type_id (cp_parser *parser)
{
return cp_parser_type_id_1 (parser, false, false);
}
static tree cp_parser_template_type_arg (cp_parser *parser)
{
tree r;
const char *saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in template arguments");
r = cp_parser_type_id_1 (parser, true, false);
parser->type_definition_forbidden_message = saved_message;
if (cxx_dialect >= cxx14 && type_uses_auto (r))
{
error ("invalid use of %<auto%> in template argument");
r = error_mark_node;
}
return r;
}
static tree cp_parser_trailing_type_id (cp_parser *parser)
{
return cp_parser_type_id_1 (parser, false, true);
}
/* Parse a type-specifier-seq.
type-specifier-seq:
type-specifier type-specifier-seq [opt]
GNU extension:
type-specifier-seq:
attributes type-specifier-seq [opt]
If IS_DECLARATION is true, we are at the start of a "condition" or
exception-declaration, so we might be followed by a declarator-id.
If IS_TRAILING_RETURN is true, we are in a trailing-return-type,
i.e. we've just seen "->".
Sets *TYPE_SPECIFIER_SEQ to represent the sequence. */
static void
cp_parser_type_specifier_seq (cp_parser* parser,
bool is_declaration,
bool is_trailing_return,
cp_decl_specifier_seq *type_specifier_seq)
{
bool seen_type_specifier = false;
cp_parser_flags flags = CP_PARSER_FLAGS_OPTIONAL;
cp_token *start_token = NULL;
/* Clear the TYPE_SPECIFIER_SEQ. */
clear_decl_specs (type_specifier_seq);
/* In the context of a trailing return type, enum E { } is an
elaborated-type-specifier followed by a function-body, not an
enum-specifier. */
if (is_trailing_return)
flags |= CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS;
/* Parse the type-specifiers and attributes. */
while (true)
{
tree type_specifier;
bool is_cv_qualifier;
/* Check for attributes first. */
if (cp_next_tokens_can_be_attribute_p (parser))
{
type_specifier_seq->attributes =
chainon (type_specifier_seq->attributes,
cp_parser_attributes_opt (parser));
continue;
}
/* record the token of the beginning of the type specifier seq,
for error reporting purposes*/
if (!start_token)
start_token = cp_lexer_peek_token (parser->lexer);
/* Look for the type-specifier. */
type_specifier = cp_parser_type_specifier (parser,
flags,
type_specifier_seq,
/*is_declaration=*/false,
NULL,
&is_cv_qualifier);
if (!type_specifier)
{
/* If the first type-specifier could not be found, this is not a
type-specifier-seq at all. */
if (!seen_type_specifier)
{
/* Set in_declarator_p to avoid skipping to the semicolon. */
int in_decl = parser->in_declarator_p;
parser->in_declarator_p = true;
if (cp_parser_uncommitted_to_tentative_parse_p (parser)
|| !cp_parser_parse_and_diagnose_invalid_type_name (parser))
cp_parser_error (parser, "expected type-specifier");
parser->in_declarator_p = in_decl;
type_specifier_seq->type = error_mark_node;
return;
}
/* If subsequent type-specifiers could not be found, the
type-specifier-seq is complete. */
break;
}
seen_type_specifier = true;
/* The standard says that a condition can be:
type-specifier-seq declarator = assignment-expression
However, given:
struct S {};
if (int S = ...)
we should treat the "S" as a declarator, not as a
type-specifier. The standard doesn't say that explicitly for
type-specifier-seq, but it does say that for
decl-specifier-seq in an ordinary declaration. Perhaps it
would be clearer just to allow a decl-specifier-seq here, and
then add a semantic restriction that if any decl-specifiers
that are not type-specifiers appear, the program is invalid. */
if (is_declaration && !is_cv_qualifier)
flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES;
}
}
/* Return whether the function currently being declared has an associated
template parameter list. */
static bool
function_being_declared_is_template_p (cp_parser* parser)
{
if (!current_template_parms || processing_template_parmlist)
return false;
if (parser->implicit_template_scope)
return true;
if (at_class_scope_p ()
&& TYPE_BEING_DEFINED (current_class_type))
return parser->num_template_parameter_lists != 0;
return ((int) parser->num_template_parameter_lists > template_class_depth
(current_class_type));
}
/* Parse a parameter-declaration-clause.
parameter-declaration-clause:
parameter-declaration-list [opt] ... [opt]
parameter-declaration-list , ...
Returns a representation for the parameter declarations. A return
value of NULL indicates a parameter-declaration-clause consisting
only of an ellipsis. */
static tree
cp_parser_parameter_declaration_clause (cp_parser* parser)
{
tree parameters;
cp_token *token;
bool ellipsis_p;
bool is_error;
struct cleanup {
cp_parser* parser;
int auto_is_implicit_function_template_parm_p;
~cleanup() {
parser->auto_is_implicit_function_template_parm_p
= auto_is_implicit_function_template_parm_p;
}
} cleanup = { parser, parser->auto_is_implicit_function_template_parm_p };
(void) cleanup;
if (!processing_specialization
&& !processing_template_parmlist
&& !processing_explicit_instantiation)
if (!current_function_decl
|| (current_class_type && LAMBDA_TYPE_P (current_class_type)))
parser->auto_is_implicit_function_template_parm_p = true;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Check for trivial parameter-declaration-clauses. */
if (token->type == CPP_ELLIPSIS)
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
return NULL_TREE;
}
else if (token->type == CPP_CLOSE_PAREN)
/* There are no parameters. */
{
#ifndef NO_IMPLICIT_EXTERN_C
if (in_system_header_at (input_location)
&& current_class_type == NULL
&& current_lang_name == lang_name_c)
return NULL_TREE;
else
#endif
return void_list_node;
}
/* Check for `(void)', too, which is a special case. */
else if (token->keyword == RID_VOID
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_CLOSE_PAREN))
{
/* Consume the `void' token. */
cp_lexer_consume_token (parser->lexer);
/* There are no parameters. */
return void_list_node;
}
/* Parse the parameter-declaration-list. */
parameters = cp_parser_parameter_declaration_list (parser, &is_error);
/* If a parse error occurred while parsing the
parameter-declaration-list, then the entire
parameter-declaration-clause is erroneous. */
if (is_error)
return NULL;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `,', the clause should terminate with an ellipsis. */
if (token->type == CPP_COMMA)
{
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
/* Expect an ellipsis. */
ellipsis_p
= (cp_parser_require (parser, CPP_ELLIPSIS, RT_ELLIPSIS) != NULL);
}
/* It might also be `...' if the optional trailing `,' was
omitted. */
else if (token->type == CPP_ELLIPSIS)
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
/* And remember that we saw it. */
ellipsis_p = true;
}
else
ellipsis_p = false;
/* Finish the parameter list. */
if (!ellipsis_p)
parameters = chainon (parameters, void_list_node);
return parameters;
}
/* Parse a parameter-declaration-list.
parameter-declaration-list:
parameter-declaration
parameter-declaration-list , parameter-declaration
Returns a representation of the parameter-declaration-list, as for
cp_parser_parameter_declaration_clause. However, the
`void_list_node' is never appended to the list. Upon return,
*IS_ERROR will be true iff an error occurred. */
static tree
cp_parser_parameter_declaration_list (cp_parser* parser, bool *is_error)
{
tree parameters = NULL_TREE;
tree *tail = ¶meters;
bool saved_in_unbraced_linkage_specification_p;
int index = 0;
/* Assume all will go well. */
*is_error = false;
/* The special considerations that apply to a function within an
unbraced linkage specifications do not apply to the parameters
to the function. */
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = false;
/* Look for more parameters. */
while (true)
{
cp_parameter_declarator *parameter;
tree decl = error_mark_node;
bool parenthesized_p = false;
int template_parm_idx = (function_being_declared_is_template_p (parser)?
TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS
(current_template_parms)) : 0);
/* Parse the parameter. */
parameter
= cp_parser_parameter_declaration (parser,
/*template_parm_p=*/false,
&parenthesized_p);
/* We don't know yet if the enclosing context is deprecated, so wait
and warn in grokparms if appropriate. */
deprecated_state = DEPRECATED_SUPPRESS;
if (parameter)
{
/* If a function parameter pack was specified and an implicit template
parameter was introduced during cp_parser_parameter_declaration,
change any implicit parameters introduced into packs. */
if (parser->implicit_template_parms
&& parameter->declarator
&& parameter->declarator->parameter_pack_p)
{
int latest_template_parm_idx = TREE_VEC_LENGTH
(INNERMOST_TEMPLATE_PARMS (current_template_parms));
if (latest_template_parm_idx != template_parm_idx)
parameter->decl_specifiers.type = convert_generic_types_to_packs
(parameter->decl_specifiers.type,
template_parm_idx, latest_template_parm_idx);
}
decl = grokdeclarator (parameter->declarator,
¶meter->decl_specifiers,
PARM,
parameter->default_argument != NULL_TREE,
¶meter->decl_specifiers.attributes);
}
deprecated_state = DEPRECATED_NORMAL;
/* If a parse error occurred parsing the parameter declaration,
then the entire parameter-declaration-list is erroneous. */
if (decl == error_mark_node)
{
*is_error = true;
parameters = error_mark_node;
break;
}
if (parameter->decl_specifiers.attributes)
cplus_decl_attributes (&decl,
parameter->decl_specifiers.attributes,
0);
if (DECL_NAME (decl))
decl = pushdecl (decl);
if (decl != error_mark_node)
{
retrofit_lang_decl (decl);
DECL_PARM_INDEX (decl) = ++index;
DECL_PARM_LEVEL (decl) = function_parm_depth ();
}
/* Add the new parameter to the list. */
*tail = build_tree_list (parameter->default_argument, decl);
tail = &TREE_CHAIN (*tail);
/* Peek at the next token. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)
|| cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)
/* These are for Objective-C++ */
|| cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
/* The parameter-declaration-list is complete. */
break;
else if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
/* If it's an ellipsis, then the list is complete. */
if (token->type == CPP_ELLIPSIS)
break;
/* Otherwise, there must be more parameters. Consume the
`,'. */
cp_lexer_consume_token (parser->lexer);
/* When parsing something like:
int i(float f, double d)
we can tell after seeing the declaration for "f" that we
are not looking at an initialization of a variable "i",
but rather at the declaration of a function "i".
Due to the fact that the parsing of template arguments
(as specified to a template-id) requires backtracking we
cannot use this technique when inside a template argument
list. */
if (!parser->in_template_argument_list_p
&& !parser->in_type_id_in_expr_p
&& cp_parser_uncommitted_to_tentative_parse_p (parser)
/* However, a parameter-declaration of the form
"float(f)" (which is a valid declaration of a
parameter "f") can also be interpreted as an
expression (the conversion of "f" to "float"). */
&& !parenthesized_p)
cp_parser_commit_to_tentative_parse (parser);
}
else
{
cp_parser_error (parser, "expected %<,%> or %<...%>");
if (!cp_parser_uncommitted_to_tentative_parse_p (parser))
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/false);
break;
}
}
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
/* Reset implicit_template_scope if we are about to leave the function
parameter list that introduced it. Note that for out-of-line member
definitions, there will be one or more class scopes before we get to
the template parameter scope. */
if (cp_binding_level *its = parser->implicit_template_scope)
if (cp_binding_level *maybe_its = current_binding_level->level_chain)
{
while (maybe_its->kind == sk_class)
maybe_its = maybe_its->level_chain;
if (maybe_its == its)
{
parser->implicit_template_parms = 0;
parser->implicit_template_scope = 0;
}
}
return parameters;
}
/* Parse a parameter declaration.
parameter-declaration:
decl-specifier-seq ... [opt] declarator
decl-specifier-seq declarator = assignment-expression
decl-specifier-seq ... [opt] abstract-declarator [opt]
decl-specifier-seq abstract-declarator [opt] = assignment-expression
If TEMPLATE_PARM_P is TRUE, then this parameter-declaration
declares a template parameter. (In that case, a non-nested `>'
token encountered during the parsing of the assignment-expression
is not interpreted as a greater-than operator.)
Returns a representation of the parameter, or NULL if an error
occurs. If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to
true iff the declarator is of the form "(p)". */
static cp_parameter_declarator *
cp_parser_parameter_declaration (cp_parser *parser,
bool template_parm_p,
bool *parenthesized_p)
{
int declares_class_or_enum;
cp_decl_specifier_seq decl_specifiers;
cp_declarator *declarator;
tree default_argument;
cp_token *token = NULL, *declarator_token_start = NULL;
const char *saved_message;
/* In a template parameter, `>' is not an operator.
[temp.param]
When parsing a default template-argument for a non-type
template-parameter, the first non-nested `>' is taken as the end
of the template parameter-list rather than a greater-than
operator. */
/* Type definitions may not appear in parameter types. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in parameter types");
/* Parse the declaration-specifiers. */
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_NONE,
&decl_specifiers,
&declares_class_or_enum);
/* Complain about missing 'typename' or other invalid type names. */
if (!decl_specifiers.any_type_specifiers_p
&& cp_parser_parse_and_diagnose_invalid_type_name (parser))
decl_specifiers.type = error_mark_node;
/* If an error occurred, there's no reason to attempt to parse the
rest of the declaration. */
if (cp_parser_error_occurred (parser))
{
parser->type_definition_forbidden_message = saved_message;
return NULL;
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is a `)', `,', `=', `>', or `...', then there
is no declarator. However, when variadic templates are enabled,
there may be a declarator following `...'. */
if (token->type == CPP_CLOSE_PAREN
|| token->type == CPP_COMMA
|| token->type == CPP_EQ
|| token->type == CPP_GREATER)
{
declarator = NULL;
if (parenthesized_p)
*parenthesized_p = false;
}
/* Otherwise, there should be a declarator. */
else
{
bool saved_default_arg_ok_p = parser->default_arg_ok_p;
parser->default_arg_ok_p = false;
/* After seeing a decl-specifier-seq, if the next token is not a
"(", there is no possibility that the code is a valid
expression. Therefore, if parsing tentatively, we commit at
this point. */
if (!parser->in_template_argument_list_p
/* In an expression context, having seen:
(int((char ...
we cannot be sure whether we are looking at a
function-type (taking a "char" as a parameter) or a cast
of some object of type "char" to "int". */
&& !parser->in_type_id_in_expr_p
&& cp_parser_uncommitted_to_tentative_parse_p (parser)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN))
cp_parser_commit_to_tentative_parse (parser);
/* Parse the declarator. */
declarator_token_start = token;
declarator = cp_parser_declarator (parser,
CP_PARSER_DECLARATOR_EITHER,
/*ctor_dtor_or_conv_p=*/NULL,
parenthesized_p,
/*member_p=*/false,
/*friend_p=*/false);
parser->default_arg_ok_p = saved_default_arg_ok_p;
/* After the declarator, allow more attributes. */
decl_specifiers.attributes
= chainon (decl_specifiers.attributes,
cp_parser_attributes_opt (parser));
}
/* If the next token is an ellipsis, and we have not seen a
declarator name, and the type of the declarator contains parameter
packs but it is not a TYPE_PACK_EXPANSION, then we actually have
a parameter pack expansion expression. Otherwise, leave the
ellipsis for a C-style variadic function. */
token = cp_lexer_peek_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
tree type = decl_specifiers.type;
if (type && DECL_P (type))
type = TREE_TYPE (type);
if (type
&& TREE_CODE (type) != TYPE_PACK_EXPANSION
&& declarator_can_be_parameter_pack (declarator)
&& (!declarator || !declarator->parameter_pack_p)
&& uses_parameter_packs (type))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
/* Build a pack expansion type */
if (declarator)
declarator->parameter_pack_p = true;
else
decl_specifiers.type = make_pack_expansion (type);
}
}
/* The restriction on defining new types applies only to the type
of the parameter, not to the default argument. */
parser->type_definition_forbidden_message = saved_message;
/* If the next token is `=', then process a default argument. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
token = cp_lexer_peek_token (parser->lexer);
/* If we are defining a class, then the tokens that make up the
default argument must be saved and processed later. */
if (!template_parm_p && at_class_scope_p ()
&& TYPE_BEING_DEFINED (current_class_type)
&& !LAMBDA_TYPE_P (current_class_type))
default_argument = cp_parser_cache_defarg (parser, /*nsdmi=*/false);
/* Outside of a class definition, we can just parse the
assignment-expression. */
else
default_argument
= cp_parser_default_argument (parser, template_parm_p);
if (!parser->default_arg_ok_p)
{
if (flag_permissive)
warning (0, "deprecated use of default argument for parameter of non-function");
else
{
error_at (token->location,
"default arguments are only "
"permitted for function parameters");
default_argument = NULL_TREE;
}
}
else if ((declarator && declarator->parameter_pack_p)
|| (decl_specifiers.type
&& PACK_EXPANSION_P (decl_specifiers.type)))
{
/* Find the name of the parameter pack. */
cp_declarator *id_declarator = declarator;
while (id_declarator && id_declarator->kind != cdk_id)
id_declarator = id_declarator->declarator;
if (id_declarator && id_declarator->kind == cdk_id)
error_at (declarator_token_start->location,
template_parm_p
? G_("template parameter pack %qD "
"cannot have a default argument")
: G_("parameter pack %qD cannot have "
"a default argument"),
id_declarator->u.id.unqualified_name);
else
error_at (declarator_token_start->location,
template_parm_p
? G_("template parameter pack cannot have "
"a default argument")
: G_("parameter pack cannot have a "
"default argument"));
default_argument = NULL_TREE;
}
}
else
default_argument = NULL_TREE;
return make_parameter_declarator (&decl_specifiers,
declarator,
default_argument);
}
/* Parse a default argument and return it.
TEMPLATE_PARM_P is true if this is a default argument for a
non-type template parameter. */
static tree
cp_parser_default_argument (cp_parser *parser, bool template_parm_p)
{
tree default_argument = NULL_TREE;
bool saved_greater_than_is_operator_p;
bool saved_local_variables_forbidden_p;
bool non_constant_p, is_direct_init;
/* Make sure that PARSER->GREATER_THAN_IS_OPERATOR_P is
set correctly. */
saved_greater_than_is_operator_p = parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = !template_parm_p;
/* Local variable names (and the `this' keyword) may not
appear in a default argument. */
saved_local_variables_forbidden_p = parser->local_variables_forbidden_p;
parser->local_variables_forbidden_p = true;
/* Parse the assignment-expression. */
if (template_parm_p)
push_deferring_access_checks (dk_no_deferred);
tree saved_class_ptr = NULL_TREE;
tree saved_class_ref = NULL_TREE;
/* The "this" pointer is not valid in a default argument. */
if (cfun)
{
saved_class_ptr = current_class_ptr;
cp_function_chain->x_current_class_ptr = NULL_TREE;
saved_class_ref = current_class_ref;
cp_function_chain->x_current_class_ref = NULL_TREE;
}
default_argument
= cp_parser_initializer (parser, &is_direct_init, &non_constant_p);
/* Restore the "this" pointer. */
if (cfun)
{
cp_function_chain->x_current_class_ptr = saved_class_ptr;
cp_function_chain->x_current_class_ref = saved_class_ref;
}
if (BRACE_ENCLOSED_INITIALIZER_P (default_argument))
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
if (template_parm_p)
pop_deferring_access_checks ();
parser->greater_than_is_operator_p = saved_greater_than_is_operator_p;
parser->local_variables_forbidden_p = saved_local_variables_forbidden_p;
return default_argument;
}
/* Parse a function-body.
function-body:
compound_statement */
static void
cp_parser_function_body (cp_parser *parser, bool in_function_try_block)
{
cp_parser_compound_statement (parser, NULL, in_function_try_block, true);
}
/* Parse a ctor-initializer-opt followed by a function-body. Return
true if a ctor-initializer was present. When IN_FUNCTION_TRY_BLOCK
is true we are parsing a function-try-block. */
static bool
cp_parser_ctor_initializer_opt_and_function_body (cp_parser *parser,
bool in_function_try_block)
{
tree body, list;
bool ctor_initializer_p;
const bool check_body_p =
DECL_CONSTRUCTOR_P (current_function_decl)
&& DECL_DECLARED_CONSTEXPR_P (current_function_decl);
tree last = NULL;
/* Begin the function body. */
body = begin_function_body ();
/* Parse the optional ctor-initializer. */
ctor_initializer_p = cp_parser_ctor_initializer_opt (parser);
/* If we're parsing a constexpr constructor definition, we need
to check that the constructor body is indeed empty. However,
before we get to cp_parser_function_body lot of junk has been
generated, so we can't just check that we have an empty block.
Rather we take a snapshot of the outermost block, and check whether
cp_parser_function_body changed its state. */
if (check_body_p)
{
list = cur_stmt_list;
if (STATEMENT_LIST_TAIL (list))
last = STATEMENT_LIST_TAIL (list)->stmt;
}
/* Parse the function-body. */
cp_parser_function_body (parser, in_function_try_block);
if (check_body_p)
check_constexpr_ctor_body (last, list, /*complain=*/true);
/* Finish the function body. */
finish_function_body (body);
return ctor_initializer_p;
}
/* Parse an initializer.
initializer:
= initializer-clause
( expression-list )
Returns an expression representing the initializer. If no
initializer is present, NULL_TREE is returned.
*IS_DIRECT_INIT is set to FALSE if the `= initializer-clause'
production is used, and TRUE otherwise. *IS_DIRECT_INIT is
set to TRUE if there is no initializer present. If there is an
initializer, and it is not a constant-expression, *NON_CONSTANT_P
is set to true; otherwise it is set to false. */
static tree
cp_parser_initializer (cp_parser* parser, bool* is_direct_init,
bool* non_constant_p)
{
cp_token *token;
tree init;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Let our caller know whether or not this initializer was
parenthesized. */
*is_direct_init = (token->type != CPP_EQ);
/* Assume that the initializer is constant. */
*non_constant_p = false;
if (token->type == CPP_EQ)
{
/* Consume the `='. */
cp_lexer_consume_token (parser->lexer);
/* Parse the initializer-clause. */
init = cp_parser_initializer_clause (parser, non_constant_p);
}
else if (token->type == CPP_OPEN_PAREN)
{
vec<tree, va_gc> *vec;
vec = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/false,
/*allow_expansion_p=*/true,
non_constant_p);
if (vec == NULL)
return error_mark_node;
init = build_tree_list_vec (vec);
release_tree_vector (vec);
}
else if (token->type == CPP_OPEN_BRACE)
{
cp_lexer_set_source_position (parser->lexer);
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
init = cp_parser_braced_list (parser, non_constant_p);
CONSTRUCTOR_IS_DIRECT_INIT (init) = 1;
}
else
{
/* Anything else is an error. */
cp_parser_error (parser, "expected initializer");
init = error_mark_node;
}
return init;
}
/* Parse an initializer-clause.
initializer-clause:
assignment-expression
braced-init-list
Returns an expression representing the initializer.
If the `assignment-expression' production is used the value
returned is simply a representation for the expression.
Otherwise, calls cp_parser_braced_list. */
static tree
cp_parser_initializer_clause (cp_parser* parser, bool* non_constant_p)
{
tree initializer;
/* Assume the expression is constant. */
*non_constant_p = false;
/* If it is not a `{', then we are looking at an
assignment-expression. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE))
{
initializer
= cp_parser_constant_expression (parser,
/*allow_non_constant_p=*/true,
non_constant_p);
}
else
initializer = cp_parser_braced_list (parser, non_constant_p);
return initializer;
}
/* Parse a brace-enclosed initializer list.
braced-init-list:
{ initializer-list , [opt] }
{ }
Returns a CONSTRUCTOR. The CONSTRUCTOR_ELTS will be
the elements of the initializer-list (or NULL, if the last
production is used). The TREE_TYPE for the CONSTRUCTOR will be
NULL_TREE. There is no way to detect whether or not the optional
trailing `,' was provided. NON_CONSTANT_P is as for
cp_parser_initializer. */
static tree
cp_parser_braced_list (cp_parser* parser, bool* non_constant_p)
{
tree initializer;
/* Consume the `{' token. */
cp_lexer_consume_token (parser->lexer);
/* Create a CONSTRUCTOR to represent the braced-initializer. */
initializer = make_node (CONSTRUCTOR);
/* If it's not a `}', then there is a non-trivial initializer. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE))
{
/* Parse the initializer list. */
CONSTRUCTOR_ELTS (initializer)
= cp_parser_initializer_list (parser, non_constant_p);
/* A trailing `,' token is allowed. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
}
else
*non_constant_p = false;
/* Now, there should be a trailing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
TREE_TYPE (initializer) = init_list_type_node;
return initializer;
}
/* Consume tokens up to, and including, the next non-nested closing `]'.
Returns true iff we found a closing `]'. */
static bool
cp_parser_skip_to_closing_square_bracket (cp_parser *parser)
{
unsigned square_depth = 0;
while (true)
{
cp_token * token = cp_lexer_peek_token (parser->lexer);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
/* If we've run out of tokens, then there is no closing `]'. */
return false;
case CPP_OPEN_SQUARE:
++square_depth;
break;
case CPP_CLOSE_SQUARE:
if (!square_depth--)
{
cp_lexer_consume_token (parser->lexer);
return true;
}
break;
default:
break;
}
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* Return true if we are looking at an array-designator, false otherwise. */
static bool
cp_parser_array_designator_p (cp_parser *parser)
{
/* Consume the `['. */
cp_lexer_consume_token (parser->lexer);
cp_lexer_save_tokens (parser->lexer);
/* Skip tokens until the next token is a closing square bracket.
If we find the closing `]', and the next token is a `=', then
we are looking at an array designator. */
bool array_designator_p
= (cp_parser_skip_to_closing_square_bracket (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_EQ));
/* Roll back the tokens we skipped. */
cp_lexer_rollback_tokens (parser->lexer);
return array_designator_p;
}
/* Parse an initializer-list.
initializer-list:
initializer-clause ... [opt]
initializer-list , initializer-clause ... [opt]
GNU Extension:
initializer-list:
designation initializer-clause ...[opt]
initializer-list , designation initializer-clause ...[opt]
designation:
. identifier =
identifier :
[ constant-expression ] =
Returns a vec of constructor_elt. The VALUE of each elt is an expression
for the initializer. If the INDEX of the elt is non-NULL, it is the
IDENTIFIER_NODE naming the field to initialize. NON_CONSTANT_P is
as for cp_parser_initializer. */
static vec<constructor_elt, va_gc> *
cp_parser_initializer_list (cp_parser* parser, bool* non_constant_p)
{
vec<constructor_elt, va_gc> *v = NULL;
/* Assume all of the expressions are constant. */
*non_constant_p = false;
/* Parse the rest of the list. */
while (true)
{
cp_token *token;
tree designator;
tree initializer;
bool clause_non_constant_p;
/* If the next token is an identifier and the following one is a
colon, we are looking at the GNU designated-initializer
syntax. */
if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_NAME)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)
{
/* Warn the user that they are using an extension. */
pedwarn (input_location, OPT_Wpedantic,
"ISO C++ does not allow designated initializers");
/* Consume the identifier. */
designator = cp_lexer_consume_token (parser->lexer)->u.value;
/* Consume the `:'. */
cp_lexer_consume_token (parser->lexer);
}
/* Also handle the C99 syntax, '. id ='. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& cp_lexer_next_token_is (parser->lexer, CPP_DOT)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_NAME
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_EQ)
{
/* Warn the user that they are using an extension. */
pedwarn (input_location, OPT_Wpedantic,
"ISO C++ does not allow C99 designated initializers");
/* Consume the `.'. */
cp_lexer_consume_token (parser->lexer);
/* Consume the identifier. */
designator = cp_lexer_consume_token (parser->lexer)->u.value;
/* Consume the `='. */
cp_lexer_consume_token (parser->lexer);
}
/* Also handle C99 array designators, '[ const ] ='. */
else if (cp_parser_allow_gnu_extensions_p (parser)
&& !c_dialect_objc ()
&& cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
/* In C++11, [ could start a lambda-introducer. */
bool non_const = false;
cp_parser_parse_tentatively (parser);
if (!cp_parser_array_designator_p (parser))
{
cp_parser_simulate_error (parser);
designator = NULL_TREE;
}
else
{
designator = cp_parser_constant_expression (parser, true,
&non_const);
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
cp_parser_require (parser, CPP_EQ, RT_EQ);
}
if (!cp_parser_parse_definitely (parser))
designator = NULL_TREE;
else if (non_const)
require_potential_rvalue_constant_expression (designator);
}
else
designator = NULL_TREE;
/* Parse the initializer. */
initializer = cp_parser_initializer_clause (parser,
&clause_non_constant_p);
/* If any clause is non-constant, so is the entire initializer. */
if (clause_non_constant_p)
*non_constant_p = true;
/* If we have an ellipsis, this is an initializer pack
expansion. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
/* Turn the initializer into an initializer expansion. */
initializer = make_pack_expansion (initializer);
}
/* Add it to the vector. */
CONSTRUCTOR_APPEND_ELT (v, designator, initializer);
/* If the next token is not a comma, we have reached the end of
the list. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Peek at the next token. */
token = cp_lexer_peek_nth_token (parser->lexer, 2);
/* If the next token is a `}', then we're still done. An
initializer-clause can have a trailing `,' after the
initializer-list and before the closing `}'. */
if (token->type == CPP_CLOSE_BRACE)
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return v;
}
/* Classes [gram.class] */
/* Parse a class-name.
class-name:
identifier
template-id
TYPENAME_KEYWORD_P is true iff the `typename' keyword has been used
to indicate that names looked up in dependent types should be
assumed to be types. TEMPLATE_KEYWORD_P is true iff the `template'
keyword has been used to indicate that the name that appears next
is a template. TAG_TYPE indicates the explicit tag given before
the type name, if any. If CHECK_DEPENDENCY_P is FALSE, names are
looked up in dependent scopes. If CLASS_HEAD_P is TRUE, this class
is the class being defined in a class-head.
Returns the TYPE_DECL representing the class. */
static tree
cp_parser_class_name (cp_parser *parser,
bool typename_keyword_p,
bool template_keyword_p,
enum tag_types tag_type,
bool check_dependency_p,
bool class_head_p,
bool is_declaration)
{
tree decl;
tree scope;
bool typename_p;
cp_token *token;
tree identifier = NULL_TREE;
/* All class-names start with an identifier. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_NAME && token->type != CPP_TEMPLATE_ID)
{
cp_parser_error (parser, "expected class-name");
return error_mark_node;
}
/* PARSER->SCOPE can be cleared when parsing the template-arguments
to a template-id, so we save it here. */
scope = parser->scope;
if (scope == error_mark_node)
return error_mark_node;
/* Any name names a type if we're following the `typename' keyword
in a qualified name where the enclosing scope is type-dependent. */
typename_p = (typename_keyword_p && scope && TYPE_P (scope)
&& dependent_type_p (scope));
/* Handle the common case (an identifier, but not a template-id)
efficiently. */
if (token->type == CPP_NAME
&& !cp_parser_nth_token_starts_template_argument_list_p (parser, 2))
{
cp_token *identifier_token;
bool ambiguous_p;
/* Look for the identifier. */
identifier_token = cp_lexer_peek_token (parser->lexer);
ambiguous_p = identifier_token->error_reported;
identifier = cp_parser_identifier (parser);
/* If the next token isn't an identifier, we are certainly not
looking at a class-name. */
if (identifier == error_mark_node)
decl = error_mark_node;
/* If we know this is a type-name, there's no need to look it
up. */
else if (typename_p)
decl = identifier;
else
{
tree ambiguous_decls;
/* If we already know that this lookup is ambiguous, then
we've already issued an error message; there's no reason
to check again. */
if (ambiguous_p)
{
cp_parser_simulate_error (parser);
return error_mark_node;
}
/* If the next token is a `::', then the name must be a type
name.
[basic.lookup.qual]
During the lookup for a name preceding the :: scope
resolution operator, object, function, and enumerator
names are ignored. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
tag_type = typename_type;
/* Look up the name. */
decl = cp_parser_lookup_name (parser, identifier,
tag_type,
/*is_template=*/false,
/*is_namespace=*/false,
check_dependency_p,
&ambiguous_decls,
identifier_token->location);
if (ambiguous_decls)
{
if (cp_parser_parsing_tentatively (parser))
cp_parser_simulate_error (parser);
return error_mark_node;
}
}
}
else
{
/* Try a template-id. */
decl = cp_parser_template_id (parser, template_keyword_p,
check_dependency_p,
tag_type,
is_declaration);
if (decl == error_mark_node)
return error_mark_node;
}
decl = cp_parser_maybe_treat_template_as_class (decl, class_head_p);
/* If this is a typename, create a TYPENAME_TYPE. */
if (typename_p && decl != error_mark_node)
{
decl = make_typename_type (scope, decl, typename_type,
/*complain=*/tf_error);
if (decl != error_mark_node)
decl = TYPE_NAME (decl);
}
decl = strip_using_decl (decl);
/* Check to see that it is really the name of a class. */
if (TREE_CODE (decl) == TEMPLATE_ID_EXPR
&& identifier_p (TREE_OPERAND (decl, 0))
&& cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))
/* Situations like this:
template <typename T> struct A {
typename T::template X<int>::I i;
};
are problematic. Is `T::template X<int>' a class-name? The
standard does not seem to be definitive, but there is no other
valid interpretation of the following `::'. Therefore, those
names are considered class-names. */
{
decl = make_typename_type (scope, decl, tag_type, tf_error);
if (decl != error_mark_node)
decl = TYPE_NAME (decl);
}
else if (TREE_CODE (decl) != TYPE_DECL
|| TREE_TYPE (decl) == error_mark_node
|| !MAYBE_CLASS_TYPE_P (TREE_TYPE (decl))
/* In Objective-C 2.0, a classname followed by '.' starts a
dot-syntax expression, and it's not a type-name. */
|| (c_dialect_objc ()
&& cp_lexer_peek_token (parser->lexer)->type == CPP_DOT
&& objc_is_class_name (decl)))
decl = error_mark_node;
if (decl == error_mark_node)
cp_parser_error (parser, "expected class-name");
else if (identifier && !parser->scope)
maybe_note_name_used_in_class (identifier, decl);
return decl;
}
/* Parse a class-specifier.
class-specifier:
class-head { member-specification [opt] }
Returns the TREE_TYPE representing the class. */
static tree
cp_parser_class_specifier_1 (cp_parser* parser)
{
tree type;
tree attributes = NULL_TREE;
bool nested_name_specifier_p;
unsigned saved_num_template_parameter_lists;
bool saved_in_function_body;
unsigned char in_statement;
bool in_switch_statement_p;
bool saved_in_unbraced_linkage_specification_p;
tree old_scope = NULL_TREE;
tree scope = NULL_TREE;
cp_token *closing_brace;
push_deferring_access_checks (dk_no_deferred);
/* Parse the class-head. */
type = cp_parser_class_head (parser,
&nested_name_specifier_p);
/* If the class-head was a semantic disaster, skip the entire body
of the class. */
if (!type)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
pop_deferring_access_checks ();
return error_mark_node;
}
/* Look for the `{'. */
if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
{
pop_deferring_access_checks ();
return error_mark_node;
}
cp_ensure_no_omp_declare_simd (parser);
/* Issue an error message if type-definitions are forbidden here. */
cp_parser_check_type_definition (parser);
/* Remember that we are defining one more class. */
++parser->num_classes_being_defined;
/* Inside the class, surrounding template-parameter-lists do not
apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
/* We are not in a function body. */
saved_in_function_body = parser->in_function_body;
parser->in_function_body = false;
/* Or in a loop. */
in_statement = parser->in_statement;
parser->in_statement = 0;
/* Or in a switch. */
in_switch_statement_p = parser->in_switch_statement_p;
parser->in_switch_statement_p = false;
/* We are not immediately inside an extern "lang" block. */
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = false;
/* Start the class. */
if (nested_name_specifier_p)
{
scope = CP_DECL_CONTEXT (TYPE_MAIN_DECL (type));
old_scope = push_inner_scope (scope);
}
type = begin_class_definition (type);
if (type == error_mark_node)
/* If the type is erroneous, skip the entire body of the class. */
cp_parser_skip_to_closing_brace (parser);
else
/* Parse the member-specification. */
cp_parser_member_specification_opt (parser);
/* Look for the trailing `}'. */
closing_brace = cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
/* Look for trailing attributes to apply to this class. */
if (cp_parser_allow_gnu_extensions_p (parser))
attributes = cp_parser_gnu_attributes_opt (parser);
if (type != error_mark_node)
type = finish_struct (type, attributes);
if (nested_name_specifier_p)
pop_inner_scope (old_scope, scope);
/* We've finished a type definition. Check for the common syntax
error of forgetting a semicolon after the definition. We need to
be careful, as we can't just check for not-a-semicolon and be done
with it; the user might have typed:
class X { } c = ...;
class X { } *p = ...;
and so forth. Instead, enumerate all the possible tokens that
might follow this production; if we don't see one of them, then
complain and silently insert the semicolon. */
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
bool want_semicolon = true;
if (cp_next_tokens_can_be_std_attribute_p (parser))
/* Don't try to parse c++11 attributes here. As per the
grammar, that should be a task for
cp_parser_decl_specifier_seq. */
want_semicolon = false;
switch (token->type)
{
case CPP_NAME:
case CPP_SEMICOLON:
case CPP_MULT:
case CPP_AND:
case CPP_OPEN_PAREN:
case CPP_CLOSE_PAREN:
case CPP_COMMA:
want_semicolon = false;
break;
/* While it's legal for type qualifiers and storage class
specifiers to follow type definitions in the grammar, only
compiler testsuites contain code like that. Assume that if
we see such code, then what we're really seeing is a case
like:
class X { }
const <type> var = ...;
or
class Y { }
static <type> func (...) ...
i.e. the qualifier or specifier applies to the next
declaration. To do so, however, we need to look ahead one
more token to see if *that* token is a type specifier.
This code could be improved to handle:
class Z { }
static const <type> var = ...; */
case CPP_KEYWORD:
if (keyword_is_decl_specifier (token->keyword))
{
cp_token *lookahead = cp_lexer_peek_nth_token (parser->lexer, 2);
/* Handling user-defined types here would be nice, but very
tricky. */
want_semicolon
= (lookahead->type == CPP_KEYWORD
&& keyword_begins_type_specifier (lookahead->keyword));
}
break;
default:
break;
}
/* If we don't have a type, then something is very wrong and we
shouldn't try to do anything clever. Likewise for not seeing the
closing brace. */
if (closing_brace && TYPE_P (type) && want_semicolon)
{
cp_token_position prev
= cp_lexer_previous_token_position (parser->lexer);
cp_token *prev_token = cp_lexer_token_at (parser->lexer, prev);
location_t loc = prev_token->location;
if (CLASSTYPE_DECLARED_CLASS (type))
error_at (loc, "expected %<;%> after class definition");
else if (TREE_CODE (type) == RECORD_TYPE)
error_at (loc, "expected %<;%> after struct definition");
else if (TREE_CODE (type) == UNION_TYPE)
error_at (loc, "expected %<;%> after union definition");
else
gcc_unreachable ();
/* Unget one token and smash it to look as though we encountered
a semicolon in the input stream. */
cp_lexer_set_token_position (parser->lexer, prev);
token = cp_lexer_peek_token (parser->lexer);
token->type = CPP_SEMICOLON;
token->keyword = RID_MAX;
}
}
/* If this class is not itself within the scope of another class,
then we need to parse the bodies of all of the queued function
definitions. Note that the queued functions defined in a class
are not always processed immediately following the
class-specifier for that class. Consider:
struct A {
struct B { void f() { sizeof (A); } };
};
If `f' were processed before the processing of `A' were
completed, there would be no way to compute the size of `A'.
Note that the nesting we are interested in here is lexical --
not the semantic nesting given by TYPE_CONTEXT. In particular,
for:
struct A { struct B; };
struct A::B { void f() { } };
there is no need to delay the parsing of `A::B::f'. */
if (--parser->num_classes_being_defined == 0)
{
tree decl;
tree class_type = NULL_TREE;
tree pushed_scope = NULL_TREE;
unsigned ix;
cp_default_arg_entry *e;
tree save_ccp, save_ccr;
/* In a first pass, parse default arguments to the functions.
Then, in a second pass, parse the bodies of the functions.
This two-phased approach handles cases like:
struct S {
void f() { g(); }
void g(int i = 3);
};
*/
FOR_EACH_VEC_SAFE_ELT (unparsed_funs_with_default_args, ix, e)
{
decl = e->decl;
/* If there are default arguments that have not yet been processed,
take care of them now. */
if (class_type != e->class_type)
{
if (pushed_scope)
pop_scope (pushed_scope);
class_type = e->class_type;
pushed_scope = push_scope (class_type);
}
/* Make sure that any template parameters are in scope. */
maybe_begin_member_template_processing (decl);
/* Parse the default argument expressions. */
cp_parser_late_parsing_default_args (parser, decl);
/* Remove any template parameters from the symbol table. */
maybe_end_member_template_processing ();
}
vec_safe_truncate (unparsed_funs_with_default_args, 0);
/* Now parse any NSDMIs. */
save_ccp = current_class_ptr;
save_ccr = current_class_ref;
FOR_EACH_VEC_SAFE_ELT (unparsed_nsdmis, ix, decl)
{
if (class_type != DECL_CONTEXT (decl))
{
if (pushed_scope)
pop_scope (pushed_scope);
class_type = DECL_CONTEXT (decl);
pushed_scope = push_scope (class_type);
}
inject_this_parameter (class_type, TYPE_UNQUALIFIED);
cp_parser_late_parsing_nsdmi (parser, decl);
}
vec_safe_truncate (unparsed_nsdmis, 0);
current_class_ptr = save_ccp;
current_class_ref = save_ccr;
if (pushed_scope)
pop_scope (pushed_scope);
/* Now do some post-NSDMI bookkeeping. */
FOR_EACH_VEC_SAFE_ELT (unparsed_classes, ix, class_type)
after_nsdmi_defaulted_late_checks (class_type);
vec_safe_truncate (unparsed_classes, 0);
after_nsdmi_defaulted_late_checks (type);
/* Now parse the body of the functions. */
if (flag_openmp)
{
/* OpenMP UDRs need to be parsed before all other functions. */
FOR_EACH_VEC_SAFE_ELT (unparsed_funs_with_definitions, ix, decl)
if (DECL_OMP_DECLARE_REDUCTION_P (decl))
cp_parser_late_parsing_for_member (parser, decl);
FOR_EACH_VEC_SAFE_ELT (unparsed_funs_with_definitions, ix, decl)
if (!DECL_OMP_DECLARE_REDUCTION_P (decl))
cp_parser_late_parsing_for_member (parser, decl);
}
else
FOR_EACH_VEC_SAFE_ELT (unparsed_funs_with_definitions, ix, decl)
cp_parser_late_parsing_for_member (parser, decl);
vec_safe_truncate (unparsed_funs_with_definitions, 0);
}
else
vec_safe_push (unparsed_classes, type);
/* Put back any saved access checks. */
pop_deferring_access_checks ();
/* Restore saved state. */
parser->in_switch_statement_p = in_switch_statement_p;
parser->in_statement = in_statement;
parser->in_function_body = saved_in_function_body;
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
return type;
}
static tree
cp_parser_class_specifier (cp_parser* parser)
{
tree ret;
timevar_push (TV_PARSE_STRUCT);
ret = cp_parser_class_specifier_1 (parser);
timevar_pop (TV_PARSE_STRUCT);
return ret;
}
/* Parse a class-head.
class-head:
class-key identifier [opt] base-clause [opt]
class-key nested-name-specifier identifier class-virt-specifier [opt] base-clause [opt]
class-key nested-name-specifier [opt] template-id
base-clause [opt]
class-virt-specifier:
final
GNU Extensions:
class-key attributes identifier [opt] base-clause [opt]
class-key attributes nested-name-specifier identifier base-clause [opt]
class-key attributes nested-name-specifier [opt] template-id
base-clause [opt]
Upon return BASES is initialized to the list of base classes (or
NULL, if there are none) in the same form returned by
cp_parser_base_clause.
Returns the TYPE of the indicated class. Sets
*NESTED_NAME_SPECIFIER_P to TRUE iff one of the productions
involving a nested-name-specifier was used, and FALSE otherwise.
Returns error_mark_node if this is not a class-head.
Returns NULL_TREE if the class-head is syntactically valid, but
semantically invalid in a way that means we should skip the entire
body of the class. */
static tree
cp_parser_class_head (cp_parser* parser,
bool* nested_name_specifier_p)
{
tree nested_name_specifier;
enum tag_types class_key;
tree id = NULL_TREE;
tree type = NULL_TREE;
tree attributes;
tree bases;
cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED;
bool template_id_p = false;
bool qualified_p = false;
bool invalid_nested_name_p = false;
bool invalid_explicit_specialization_p = false;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
tree pushed_scope = NULL_TREE;
unsigned num_templates;
cp_token *type_start_token = NULL, *nested_name_specifier_token_start = NULL;
/* Assume no nested-name-specifier will be present. */
*nested_name_specifier_p = false;
/* Assume no template parameter lists will be used in defining the
type. */
num_templates = 0;
parser->colon_corrects_to_scope_p = false;
/* Look for the class-key. */
class_key = cp_parser_class_key (parser);
if (class_key == none_type)
return error_mark_node;
/* Parse the attributes. */
attributes = cp_parser_attributes_opt (parser);
/* If the next token is `::', that is invalid -- but sometimes
people do try to write:
struct ::S {};
Handle this gracefully by accepting the extra qualifier, and then
issuing an error about it later if this really is a
class-head. If it turns out just to be an elaborated type
specifier, remain silent. */
if (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false))
qualified_p = true;
push_deferring_access_checks (dk_no_check);
/* Determine the name of the class. Begin by looking for an
optional nested-name-specifier. */
nested_name_specifier_token_start = cp_lexer_peek_token (parser->lexer);
nested_name_specifier
= cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/false,
/*type_p=*/true,
/*is_declaration=*/false);
/* If there was a nested-name-specifier, then there *must* be an
identifier. */
if (nested_name_specifier)
{
type_start_token = cp_lexer_peek_token (parser->lexer);
/* Although the grammar says `identifier', it really means
`class-name' or `template-name'. You are only allowed to
define a class that has already been declared with this
syntax.
The proposed resolution for Core Issue 180 says that wherever
you see `class T::X' you should treat `X' as a type-name.
It is OK to define an inaccessible class; for example:
class A { class B; };
class A::B {};
We do not know if we will see a class-name, or a
template-name. We look for a class-name first, in case the
class-name is a template-id; if we looked for the
template-name first we would stop after the template-name. */
cp_parser_parse_tentatively (parser);
type = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
class_type,
/*check_dependency_p=*/false,
/*class_head_p=*/true,
/*is_declaration=*/false);
/* If that didn't work, ignore the nested-name-specifier. */
if (!cp_parser_parse_definitely (parser))
{
invalid_nested_name_p = true;
type_start_token = cp_lexer_peek_token (parser->lexer);
id = cp_parser_identifier (parser);
if (id == error_mark_node)
id = NULL_TREE;
}
/* If we could not find a corresponding TYPE, treat this
declaration like an unqualified declaration. */
if (type == error_mark_node)
nested_name_specifier = NULL_TREE;
/* Otherwise, count the number of templates used in TYPE and its
containing scopes. */
else
{
tree scope;
for (scope = TREE_TYPE (type);
scope && TREE_CODE (scope) != NAMESPACE_DECL;
scope = get_containing_scope (scope))
if (TYPE_P (scope)
&& CLASS_TYPE_P (scope)
&& CLASSTYPE_TEMPLATE_INFO (scope)
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope))
&& (!CLASSTYPE_TEMPLATE_SPECIALIZATION (scope)
|| uses_template_parms (CLASSTYPE_TI_ARGS (scope))))
++num_templates;
}
}
/* Otherwise, the identifier is optional. */
else
{
/* We don't know whether what comes next is a template-id,
an identifier, or nothing at all. */
cp_parser_parse_tentatively (parser);
/* Check for a template-id. */
type_start_token = cp_lexer_peek_token (parser->lexer);
id = cp_parser_template_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/true,
class_key,
/*is_declaration=*/true);
/* If that didn't work, it could still be an identifier. */
if (!cp_parser_parse_definitely (parser))
{
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
type_start_token = cp_lexer_peek_token (parser->lexer);
id = cp_parser_identifier (parser);
}
else
id = NULL_TREE;
}
else
{
template_id_p = true;
++num_templates;
}
}
pop_deferring_access_checks ();
if (id)
{
cp_parser_check_for_invalid_template_id (parser, id,
class_key,
type_start_token->location);
}
virt_specifiers = cp_parser_virt_specifier_seq_opt (parser);
/* If it's not a `:' or a `{' then we can't really be looking at a
class-head, since a class-head only appears as part of a
class-specifier. We have to detect this situation before calling
xref_tag, since that has irreversible side-effects. */
if (!cp_parser_next_token_starts_class_definition_p (parser))
{
cp_parser_error (parser, "expected %<{%> or %<:%>");
type = error_mark_node;
goto out;
}
/* At this point, we're going ahead with the class-specifier, even
if some other problem occurs. */
cp_parser_commit_to_tentative_parse (parser);
if (virt_specifiers & VIRT_SPEC_OVERRIDE)
{
cp_parser_error (parser,
"cannot specify %<override%> for a class");
type = error_mark_node;
goto out;
}
/* Issue the error about the overly-qualified name now. */
if (qualified_p)
{
cp_parser_error (parser,
"global qualification of class name is invalid");
type = error_mark_node;
goto out;
}
else if (invalid_nested_name_p)
{
cp_parser_error (parser,
"qualified name does not name a class");
type = error_mark_node;
goto out;
}
else if (nested_name_specifier)
{
tree scope;
/* Reject typedef-names in class heads. */
if (!DECL_IMPLICIT_TYPEDEF_P (type))
{
error_at (type_start_token->location,
"invalid class name in declaration of %qD",
type);
type = NULL_TREE;
goto done;
}
/* Figure out in what scope the declaration is being placed. */
scope = current_scope ();
/* If that scope does not contain the scope in which the
class was originally declared, the program is invalid. */
if (scope && !is_ancestor (scope, nested_name_specifier))
{
if (at_namespace_scope_p ())
error_at (type_start_token->location,
"declaration of %qD in namespace %qD which does not "
"enclose %qD",
type, scope, nested_name_specifier);
else
error_at (type_start_token->location,
"declaration of %qD in %qD which does not enclose %qD",
type, scope, nested_name_specifier);
type = NULL_TREE;
goto done;
}
/* [dcl.meaning]
A declarator-id shall not be qualified except for the
definition of a ... nested class outside of its class
... [or] the definition or explicit instantiation of a
class member of a namespace outside of its namespace. */
if (scope == nested_name_specifier)
{
permerror (nested_name_specifier_token_start->location,
"extra qualification not allowed");
nested_name_specifier = NULL_TREE;
num_templates = 0;
}
}
/* An explicit-specialization must be preceded by "template <>". If
it is not, try to recover gracefully. */
if (at_namespace_scope_p ()
&& parser->num_template_parameter_lists == 0
&& template_id_p)
{
error_at (type_start_token->location,
"an explicit specialization must be preceded by %<template <>%>");
invalid_explicit_specialization_p = true;
/* Take the same action that would have been taken by
cp_parser_explicit_specialization. */
++parser->num_template_parameter_lists;
begin_specialization ();
}
/* There must be no "return" statements between this point and the
end of this function; set "type "to the correct return value and
use "goto done;" to return. */
/* Make sure that the right number of template parameters were
present. */
if (!cp_parser_check_template_parameters (parser, num_templates,
type_start_token->location,
/*declarator=*/NULL))
{
/* If something went wrong, there is no point in even trying to
process the class-definition. */
type = NULL_TREE;
goto done;
}
/* Look up the type. */
if (template_id_p)
{
if (TREE_CODE (id) == TEMPLATE_ID_EXPR
&& (DECL_FUNCTION_TEMPLATE_P (TREE_OPERAND (id, 0))
|| TREE_CODE (TREE_OPERAND (id, 0)) == OVERLOAD))
{
error_at (type_start_token->location,
"function template %qD redeclared as a class template", id);
type = error_mark_node;
}
else
{
type = TREE_TYPE (id);
type = maybe_process_partial_specialization (type);
}
if (nested_name_specifier)
pushed_scope = push_scope (nested_name_specifier);
}
else if (nested_name_specifier)
{
tree class_type;
/* Given:
template <typename T> struct S { struct T };
template <typename T> struct S<T>::T { };
we will get a TYPENAME_TYPE when processing the definition of
`S::T'. We need to resolve it to the actual type before we
try to define it. */
if (TREE_CODE (TREE_TYPE (type)) == TYPENAME_TYPE)
{
class_type = resolve_typename_type (TREE_TYPE (type),
/*only_current_p=*/false);
if (TREE_CODE (class_type) != TYPENAME_TYPE)
type = TYPE_NAME (class_type);
else
{
cp_parser_error (parser, "could not resolve typename type");
type = error_mark_node;
}
}
if (maybe_process_partial_specialization (TREE_TYPE (type))
== error_mark_node)
{
type = NULL_TREE;
goto done;
}
class_type = current_class_type;
/* Enter the scope indicated by the nested-name-specifier. */
pushed_scope = push_scope (nested_name_specifier);
/* Get the canonical version of this type. */
type = TYPE_MAIN_DECL (TREE_TYPE (type));
/* Call push_template_decl if it seems like we should be defining a
template either from the template headers or the type we're
defining, so that we diagnose both extra and missing headers. */
if ((PROCESSING_REAL_TEMPLATE_DECL_P ()
|| CLASSTYPE_TEMPLATE_INFO (TREE_TYPE (type)))
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (type)))
{
type = push_template_decl (type);
if (type == error_mark_node)
{
type = NULL_TREE;
goto done;
}
}
type = TREE_TYPE (type);
*nested_name_specifier_p = true;
}
else /* The name is not a nested name. */
{
/* If the class was unnamed, create a dummy name. */
if (!id)
id = make_anon_name ();
type = xref_tag (class_key, id, /*tag_scope=*/ts_current,
parser->num_template_parameter_lists);
}
/* Indicate whether this class was declared as a `class' or as a
`struct'. */
if (TREE_CODE (type) == RECORD_TYPE)
CLASSTYPE_DECLARED_CLASS (type) = (class_key == class_type);
cp_parser_check_class_key (class_key, type);
/* If this type was already complete, and we see another definition,
that's an error. */
if (type != error_mark_node && COMPLETE_TYPE_P (type))
{
error_at (type_start_token->location, "redefinition of %q#T",
type);
error_at (type_start_token->location, "previous definition of %q+#T",
type);
type = NULL_TREE;
goto done;
}
else if (type == error_mark_node)
type = NULL_TREE;
if (type)
{
/* Apply attributes now, before any use of the class as a template
argument in its base list. */
cplus_decl_attributes (&type, attributes, (int)ATTR_FLAG_TYPE_IN_PLACE);
fixup_attribute_variants (type);
}
/* We will have entered the scope containing the class; the names of
base classes should be looked up in that context. For example:
struct A { struct B {}; struct C; };
struct A::C : B {};
is valid. */
/* Get the list of base-classes, if there is one. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
/* PR59482: enter the class scope so that base-specifiers are looked
up correctly. */
if (type)
pushclass (type);
bases = cp_parser_base_clause (parser);
/* PR59482: get out of the previously pushed class scope so that the
subsequent pops pop the right thing. */
if (type)
popclass ();
}
else
bases = NULL_TREE;
/* If we're really defining a class, process the base classes.
If they're invalid, fail. */
if (type && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)
&& !xref_basetypes (type, bases))
type = NULL_TREE;
done:
/* Leave the scope given by the nested-name-specifier. We will
enter the class scope itself while processing the members. */
if (pushed_scope)
pop_scope (pushed_scope);
if (invalid_explicit_specialization_p)
{
end_specialization ();
--parser->num_template_parameter_lists;
}
if (type)
DECL_SOURCE_LOCATION (TYPE_NAME (type)) = type_start_token->location;
if (type && (virt_specifiers & VIRT_SPEC_FINAL))
CLASSTYPE_FINAL (type) = 1;
out:
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
return type;
}
/* Parse a class-key.
class-key:
class
struct
union
Returns the kind of class-key specified, or none_type to indicate
error. */
static enum tag_types
cp_parser_class_key (cp_parser* parser)
{
cp_token *token;
enum tag_types tag_type;
/* Look for the class-key. */
token = cp_parser_require (parser, CPP_KEYWORD, RT_CLASS_KEY);
if (!token)
return none_type;
/* Check to see if the TOKEN is a class-key. */
tag_type = cp_parser_token_is_class_key (token);
if (!tag_type)
cp_parser_error (parser, "expected class-key");
return tag_type;
}
/* Parse a type-parameter-key.
type-parameter-key:
class
typename
*/
static void
cp_parser_type_parameter_key (cp_parser* parser)
{
/* Look for the type-parameter-key. */
enum tag_types tag_type = none_type;
cp_token *token = cp_lexer_peek_token (parser->lexer);
if ((tag_type = cp_parser_token_is_type_parameter_key (token)) != none_type)
{
cp_lexer_consume_token (parser->lexer);
if (pedantic && tag_type == typename_type && cxx_dialect < cxx1z)
/* typename is not allowed in a template template parameter
by the standard until C++1Z. */
pedwarn (token->location, OPT_Wpedantic,
"ISO C++ forbids typename key in template template parameter;"
" use -std=c++1z or -std=gnu++1z");
}
else
cp_parser_error (parser, "expected %<class%> or %<typename%>");
return;
}
/* Parse an (optional) member-specification.
member-specification:
member-declaration member-specification [opt]
access-specifier : member-specification [opt] */
static void
cp_parser_member_specification_opt (cp_parser* parser)
{
while (true)
{
cp_token *token;
enum rid keyword;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's a `}', or EOF then we've seen all the members. */
if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
/* See if this token is a keyword. */
keyword = token->keyword;
switch (keyword)
{
case RID_PUBLIC:
case RID_PROTECTED:
case RID_PRIVATE:
/* Consume the access-specifier. */
cp_lexer_consume_token (parser->lexer);
/* Remember which access-specifier is active. */
current_access_specifier = token->u.value;
/* Look for the `:'. */
cp_parser_require (parser, CPP_COLON, RT_COLON);
break;
default:
/* Accept #pragmas at class scope. */
if (token->type == CPP_PRAGMA)
{
cp_parser_pragma (parser, pragma_member);
break;
}
/* Otherwise, the next construction must be a
member-declaration. */
cp_parser_member_declaration (parser);
}
}
}
/* Parse a member-declaration.
member-declaration:
decl-specifier-seq [opt] member-declarator-list [opt] ;
function-definition ; [opt]
:: [opt] nested-name-specifier template [opt] unqualified-id ;
using-declaration
template-declaration
alias-declaration
member-declarator-list:
member-declarator
member-declarator-list , member-declarator
member-declarator:
declarator pure-specifier [opt]
declarator constant-initializer [opt]
identifier [opt] : constant-expression
GNU Extensions:
member-declaration:
__extension__ member-declaration
member-declarator:
declarator attributes [opt] pure-specifier [opt]
declarator attributes [opt] constant-initializer [opt]
identifier [opt] attributes [opt] : constant-expression
C++0x Extensions:
member-declaration:
static_assert-declaration */
static void
cp_parser_member_declaration (cp_parser* parser)
{
cp_decl_specifier_seq decl_specifiers;
tree prefix_attributes;
tree decl;
int declares_class_or_enum;
bool friend_p;
cp_token *token = NULL;
cp_token *decl_spec_token_start = NULL;
cp_token *initializer_token_start = NULL;
int saved_pedantic;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
/* Check for the `__extension__' keyword. */
if (cp_parser_extension_opt (parser, &saved_pedantic))
{
/* Recurse. */
cp_parser_member_declaration (parser);
/* Restore the old value of the PEDANTIC flag. */
pedantic = saved_pedantic;
return;
}
/* Check for a template-declaration. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
/* An explicit specialization here is an error condition, and we
expect the specialization handler to detect and report this. */
if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER)
cp_parser_explicit_specialization (parser);
else
cp_parser_template_declaration (parser, /*member_p=*/true);
return;
}
/* Check for a using-declaration. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_USING))
{
if (cxx_dialect < cxx11)
{
/* Parse the using-declaration. */
cp_parser_using_declaration (parser,
/*access_declaration_p=*/false);
return;
}
else
{
tree decl;
bool alias_decl_expected;
cp_parser_parse_tentatively (parser);
decl = cp_parser_alias_declaration (parser);
/* Note that if we actually see the '=' token after the
identifier, cp_parser_alias_declaration commits the
tentative parse. In that case, we really expects an
alias-declaration. Otherwise, we expect a using
declaration. */
alias_decl_expected =
!cp_parser_uncommitted_to_tentative_parse_p (parser);
cp_parser_parse_definitely (parser);
if (alias_decl_expected)
finish_member_declaration (decl);
else
cp_parser_using_declaration (parser,
/*access_declaration_p=*/false);
return;
}
}
/* Check for @defs. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_DEFS))
{
tree ivar, member;
tree ivar_chains = cp_parser_objc_defs_expression (parser);
ivar = ivar_chains;
while (ivar)
{
member = ivar;
ivar = TREE_CHAIN (member);
TREE_CHAIN (member) = NULL_TREE;
finish_member_declaration (member);
}
return;
}
/* If the next token is `static_assert' we have a static assertion. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC_ASSERT))
{
cp_parser_static_assert (parser, /*member_p=*/true);
return;
}
parser->colon_corrects_to_scope_p = false;
if (cp_parser_using_declaration (parser, /*access_declaration=*/true))
goto out;
/* Parse the decl-specifier-seq. */
decl_spec_token_start = cp_lexer_peek_token (parser->lexer);
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
/* Check for an invalid type-name. */
if (!decl_specifiers.any_type_specifiers_p
&& cp_parser_parse_and_diagnose_invalid_type_name (parser))
goto out;
/* If there is no declarator, then the decl-specifier-seq should
specify a type. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
/* If there was no decl-specifier-seq, and the next token is a
`;', then we have something like:
struct S { ; };
[class.mem]
Each member-declaration shall declare at least one member
name of the class. */
if (!decl_specifiers.any_specifiers_p)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (!in_system_header_at (token->location))
pedwarn (token->location, OPT_Wpedantic, "extra %<;%>");
}
else
{
tree type;
/* See if this declaration is a friend. */
friend_p = cp_parser_friend_p (&decl_specifiers);
/* If there were decl-specifiers, check to see if there was
a class-declaration. */
type = check_tag_decl (&decl_specifiers,
/*explicit_type_instantiation_p=*/false);
/* Nested classes have already been added to the class, but
a `friend' needs to be explicitly registered. */
if (friend_p)
{
/* If the `friend' keyword was present, the friend must
be introduced with a class-key. */
if (!declares_class_or_enum && cxx_dialect < cxx11)
pedwarn (decl_spec_token_start->location, OPT_Wpedantic,
"in C++03 a class-key must be used "
"when declaring a friend");
/* In this case:
template <typename T> struct A {
friend struct A<T>::B;
};
A<T>::B will be represented by a TYPENAME_TYPE, and
therefore not recognized by check_tag_decl. */
if (!type)
{
type = decl_specifiers.type;
if (type && TREE_CODE (type) == TYPE_DECL)
type = TREE_TYPE (type);
}
if (!type || !TYPE_P (type))
error_at (decl_spec_token_start->location,
"friend declaration does not name a class or "
"function");
else
make_friend_class (current_class_type, type,
/*complain=*/true);
}
/* If there is no TYPE, an error message will already have
been issued. */
else if (!type || type == error_mark_node)
;
/* An anonymous aggregate has to be handled specially; such
a declaration really declares a data member (with a
particular type), as opposed to a nested class. */
else if (ANON_AGGR_TYPE_P (type))
{
/* C++11 9.5/6. */
if (decl_specifiers.storage_class != sc_none)
error_at (decl_spec_token_start->location,
"a storage class on an anonymous aggregate "
"in class scope is not allowed");
/* Remove constructors and such from TYPE, now that we
know it is an anonymous aggregate. */
fixup_anonymous_aggr (type);
/* And make the corresponding data member. */
decl = build_decl (decl_spec_token_start->location,
FIELD_DECL, NULL_TREE, type);
/* Add it to the class. */
finish_member_declaration (decl);
}
else
cp_parser_check_access_in_redeclaration
(TYPE_NAME (type),
decl_spec_token_start->location);
}
}
else
{
bool assume_semicolon = false;
/* Clear attributes from the decl_specifiers but keep them
around as prefix attributes that apply them to the entity
being declared. */
prefix_attributes = decl_specifiers.attributes;
decl_specifiers.attributes = NULL_TREE;
/* See if these declarations will be friends. */
friend_p = cp_parser_friend_p (&decl_specifiers);
/* Keep going until we hit the `;' at the end of the
declaration. */
while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
tree attributes = NULL_TREE;
tree first_attribute;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Check for a bitfield declaration. */
if (token->type == CPP_COLON
|| (token->type == CPP_NAME
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_COLON))
{
tree identifier;
tree width;
/* Get the name of the bitfield. Note that we cannot just
check TOKEN here because it may have been invalidated by
the call to cp_lexer_peek_nth_token above. */
if (cp_lexer_peek_token (parser->lexer)->type != CPP_COLON)
identifier = cp_parser_identifier (parser);
else
identifier = NULL_TREE;
/* Consume the `:' token. */
cp_lexer_consume_token (parser->lexer);
/* Get the width of the bitfield. */
width
= cp_parser_constant_expression (parser);
/* Look for attributes that apply to the bitfield. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
/* Create the bitfield declaration. */
decl = grokbitfield (identifier
? make_id_declarator (NULL_TREE,
identifier,
sfk_none)
: NULL,
&decl_specifiers,
width,
attributes);
}
else
{
cp_declarator *declarator;
tree initializer;
tree asm_specification;
int ctor_dtor_or_conv_p;
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
/*member_p=*/true,
friend_p);
/* If something went wrong parsing the declarator, make sure
that we at least consume some tokens. */
if (declarator == cp_error_declarator)
{
/* Skip to the end of the statement. */
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is not a semicolon, that is
probably because we just skipped over the body of
a function. So, we consume a semicolon if
present, but do not issue an error message if it
is not present. */
if (cp_lexer_next_token_is (parser->lexer,
CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
goto out;
}
if (declares_class_or_enum & 2)
cp_parser_check_for_definition_in_return_type
(declarator, decl_specifiers.type,
decl_specifiers.locations[ds_type_spec]);
/* Look for an asm-specification. */
asm_specification = cp_parser_asm_specification_opt (parser);
/* Look for attributes that apply to the declaration. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
/* If it's an `=', then we have a constant-initializer or a
pure-specifier. It is not correct to parse the
initializer before registering the member declaration
since the member declaration should be in scope while
its initializer is processed. However, the rest of the
front end does not yet provide an interface that allows
us to handle this correctly. */
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
/* In [class.mem]:
A pure-specifier shall be used only in the declaration of
a virtual function.
A member-declarator can contain a constant-initializer
only if it declares a static member of integral or
enumeration type.
Therefore, if the DECLARATOR is for a function, we look
for a pure-specifier; otherwise, we look for a
constant-initializer. When we call `grokfield', it will
perform more stringent semantics checks. */
initializer_token_start = cp_lexer_peek_token (parser->lexer);
if (function_declarator_p (declarator)
|| (decl_specifiers.type
&& TREE_CODE (decl_specifiers.type) == TYPE_DECL
&& declarator->kind == cdk_id
&& (TREE_CODE (TREE_TYPE (decl_specifiers.type))
== FUNCTION_TYPE)))
initializer = cp_parser_pure_specifier (parser);
else if (decl_specifiers.storage_class != sc_static)
initializer = cp_parser_save_nsdmi (parser);
else if (cxx_dialect >= cxx11)
{
bool nonconst;
/* Don't require a constant rvalue in C++11, since we
might want a reference constant. We'll enforce
constancy later. */
cp_lexer_consume_token (parser->lexer);
/* Parse the initializer. */
initializer = cp_parser_initializer_clause (parser,
&nonconst);
}
else
/* Parse the initializer. */
initializer = cp_parser_constant_initializer (parser);
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)
&& !function_declarator_p (declarator))
{
bool x;
if (decl_specifiers.storage_class != sc_static)
initializer = cp_parser_save_nsdmi (parser);
else
initializer = cp_parser_initializer (parser, &x, &x);
}
/* Otherwise, there is no initializer. */
else
initializer = NULL_TREE;
/* See if we are probably looking at a function
definition. We are certainly not looking at a
member-declarator. Calling `grokfield' has
side-effects, so we must not do it unless we are sure
that we are looking at a member-declarator. */
if (cp_parser_token_starts_function_definition_p
(cp_lexer_peek_token (parser->lexer)))
{
/* The grammar does not allow a pure-specifier to be
used when a member function is defined. (It is
possible that this fact is an oversight in the
standard, since a pure function may be defined
outside of the class-specifier. */
if (initializer && initializer_token_start)
error_at (initializer_token_start->location,
"pure-specifier on function-definition");
decl = cp_parser_save_member_function_body (parser,
&decl_specifiers,
declarator,
attributes);
if (parser->fully_implicit_function_template_p)
decl = finish_fully_implicit_template (parser, decl);
/* If the member was not a friend, declare it here. */
if (!friend_p)
finish_member_declaration (decl);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token is a semicolon, consume it. */
if (token->type == CPP_SEMICOLON)
cp_lexer_consume_token (parser->lexer);
goto out;
}
else
if (declarator->kind == cdk_function)
declarator->id_loc = token->location;
/* Create the declaration. */
decl = grokfield (declarator, &decl_specifiers,
initializer, /*init_const_expr_p=*/true,
asm_specification, attributes);
if (parser->fully_implicit_function_template_p)
{
if (friend_p)
finish_fully_implicit_template (parser, 0);
else
decl = finish_fully_implicit_template (parser, decl);
}
}
cp_finalize_omp_declare_simd (parser, decl);
/* Reset PREFIX_ATTRIBUTES. */
while (attributes && TREE_CHAIN (attributes) != first_attribute)
attributes = TREE_CHAIN (attributes);
if (attributes)
TREE_CHAIN (attributes) = NULL_TREE;
/* If there is any qualification still in effect, clear it
now; we will be starting fresh with the next declarator. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
/* If it's a `,', then there are more declarators. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
{
cp_token *token = cp_lexer_previous_token (parser->lexer);
error_at (token->location,
"stray %<,%> at end of member declaration");
}
}
/* If the next token isn't a `;', then we have a parse error. */
else if (cp_lexer_next_token_is_not (parser->lexer,
CPP_SEMICOLON))
{
/* The next token might be a ways away from where the
actual semicolon is missing. Find the previous token
and use that for our error position. */
cp_token *token = cp_lexer_previous_token (parser->lexer);
error_at (token->location,
"expected %<;%> at end of member declaration");
/* Assume that the user meant to provide a semicolon. If
we were to cp_parser_skip_to_end_of_statement, we might
skip to a semicolon inside a member function definition
and issue nonsensical error messages. */
assume_semicolon = true;
}
if (decl)
{
/* Add DECL to the list of members. */
if (!friend_p
/* Explicitly include, eg, NSDMIs, for better error
recovery (c++/58650). */
|| !DECL_DECLARES_FUNCTION_P (decl))
finish_member_declaration (decl);
if (TREE_CODE (decl) == FUNCTION_DECL)
cp_parser_save_default_args (parser, decl);
else if (TREE_CODE (decl) == FIELD_DECL
&& !DECL_C_BIT_FIELD (decl)
&& DECL_INITIAL (decl))
/* Add DECL to the queue of NSDMI to be parsed later. */
vec_safe_push (unparsed_nsdmis, decl);
}
if (assume_semicolon)
goto out;
}
}
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
out:
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
}
/* Parse a pure-specifier.
pure-specifier:
= 0
Returns INTEGER_ZERO_NODE if a pure specifier is found.
Otherwise, ERROR_MARK_NODE is returned. */
static tree
cp_parser_pure_specifier (cp_parser* parser)
{
cp_token *token;
/* Look for the `=' token. */
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
return error_mark_node;
/* Look for the `0' token. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
return error_mark_node;
cp_lexer_consume_token (parser->lexer);
/* Accept = default or = delete in c++0x mode. */
if (token->keyword == RID_DEFAULT
|| token->keyword == RID_DELETE)
{
maybe_warn_cpp0x (CPP0X_DEFAULTED_DELETED);
return token->u.value;
}
/* c_lex_with_flags marks a single digit '0' with PURE_ZERO. */
if (token->type != CPP_NUMBER || !(token->flags & PURE_ZERO))
{
cp_parser_error (parser,
"invalid pure specifier (only %<= 0%> is allowed)");
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
if (PROCESSING_REAL_TEMPLATE_DECL_P ())
{
error_at (token->location, "templates may not be %<virtual%>");
return error_mark_node;
}
return integer_zero_node;
}
/* Parse a constant-initializer.
constant-initializer:
= constant-expression
Returns a representation of the constant-expression. */
static tree
cp_parser_constant_initializer (cp_parser* parser)
{
/* Look for the `=' token. */
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
return error_mark_node;
/* It is invalid to write:
struct S { static const int i = { 7 }; };
*/
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_parser_error (parser,
"a brace-enclosed initializer is not allowed here");
/* Consume the opening brace. */
cp_lexer_consume_token (parser->lexer);
/* Skip the initializer. */
cp_parser_skip_to_closing_brace (parser);
/* Look for the trailing `}'. */
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
return error_mark_node;
}
return cp_parser_constant_expression (parser);
}
/* Derived classes [gram.class.derived] */
/* Parse a base-clause.
base-clause:
: base-specifier-list
base-specifier-list:
base-specifier ... [opt]
base-specifier-list , base-specifier ... [opt]
Returns a TREE_LIST representing the base-classes, in the order in
which they were declared. The representation of each node is as
described by cp_parser_base_specifier.
In the case that no bases are specified, this function will return
NULL_TREE, not ERROR_MARK_NODE. */
static tree
cp_parser_base_clause (cp_parser* parser)
{
tree bases = NULL_TREE;
/* Look for the `:' that begins the list. */
cp_parser_require (parser, CPP_COLON, RT_COLON);
/* Scan the base-specifier-list. */
while (true)
{
cp_token *token;
tree base;
bool pack_expansion_p = false;
/* Look for the base-specifier. */
base = cp_parser_base_specifier (parser);
/* Look for the (optional) ellipsis. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
pack_expansion_p = true;
}
/* Add BASE to the front of the list. */
if (base && base != error_mark_node)
{
if (pack_expansion_p)
/* Make this a pack expansion type. */
TREE_VALUE (base) = make_pack_expansion (TREE_VALUE (base));
if (!check_for_bare_parameter_packs (TREE_VALUE (base)))
{
TREE_CHAIN (base) = bases;
bases = base;
}
}
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not a comma, then the list is complete. */
if (token->type != CPP_COMMA)
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
}
/* PARSER->SCOPE may still be non-NULL at this point, if the last
base class had a qualified name. However, the next name that
appears is certainly not qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
return nreverse (bases);
}
/* Parse a base-specifier.
base-specifier:
:: [opt] nested-name-specifier [opt] class-name
virtual access-specifier [opt] :: [opt] nested-name-specifier
[opt] class-name
access-specifier virtual [opt] :: [opt] nested-name-specifier
[opt] class-name
Returns a TREE_LIST. The TREE_PURPOSE will be one of
ACCESS_{DEFAULT,PUBLIC,PROTECTED,PRIVATE}_[VIRTUAL]_NODE to
indicate the specifiers provided. The TREE_VALUE will be a TYPE
(or the ERROR_MARK_NODE) indicating the type that was specified. */
static tree
cp_parser_base_specifier (cp_parser* parser)
{
cp_token *token;
bool done = false;
bool virtual_p = false;
bool duplicate_virtual_error_issued_p = false;
bool duplicate_access_error_issued_p = false;
bool class_scope_p, template_p;
tree access = access_default_node;
tree type;
/* Process the optional `virtual' and `access-specifier'. */
while (!done)
{
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Process `virtual'. */
switch (token->keyword)
{
case RID_VIRTUAL:
/* If `virtual' appears more than once, issue an error. */
if (virtual_p && !duplicate_virtual_error_issued_p)
{
cp_parser_error (parser,
"%<virtual%> specified more than once in base-specified");
duplicate_virtual_error_issued_p = true;
}
virtual_p = true;
/* Consume the `virtual' token. */
cp_lexer_consume_token (parser->lexer);
break;
case RID_PUBLIC:
case RID_PROTECTED:
case RID_PRIVATE:
/* If more than one access specifier appears, issue an
error. */
if (access != access_default_node
&& !duplicate_access_error_issued_p)
{
cp_parser_error (parser,
"more than one access specifier in base-specified");
duplicate_access_error_issued_p = true;
}
access = ridpointers[(int) token->keyword];
/* Consume the access-specifier. */
cp_lexer_consume_token (parser->lexer);
break;
default:
done = true;
break;
}
}
/* It is not uncommon to see programs mechanically, erroneously, use
the 'typename' keyword to denote (dependent) qualified types
as base classes. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME))
{
token = cp_lexer_peek_token (parser->lexer);
if (!processing_template_decl)
error_at (token->location,
"keyword %<typename%> not allowed outside of templates");
else
error_at (token->location,
"keyword %<typename%> not allowed in this context "
"(the base class is implicitly a type)");
cp_lexer_consume_token (parser->lexer);
}
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false);
/* Look for the nested-name-specifier. The simplest way to
implement:
[temp.res]
The keyword `typename' is not permitted in a base-specifier or
mem-initializer; in these contexts a qualified name that
depends on a template-parameter is implicitly assumed to be a
type name.
is to pretend that we have seen the `typename' keyword at this
point. */
cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/true,
/*check_dependency_p=*/true,
typename_type,
/*is_declaration=*/true);
/* If the base class is given by a qualified name, assume that names
we see are type names or templates, as appropriate. */
class_scope_p = (parser->scope && TYPE_P (parser->scope));
template_p = class_scope_p && cp_parser_optional_template_keyword (parser);
if (!parser->scope
&& cp_lexer_next_token_is_decltype (parser->lexer))
/* DR 950 allows decltype as a base-specifier. */
type = cp_parser_decltype (parser);
else
{
/* Otherwise, look for the class-name. */
type = cp_parser_class_name (parser,
class_scope_p,
template_p,
typename_type,
/*check_dependency_p=*/true,
/*class_head_p=*/false,
/*is_declaration=*/true);
type = TREE_TYPE (type);
}
if (type == error_mark_node)
return error_mark_node;
return finish_base_specifier (type, access, virtual_p);
}
/* Exception handling [gram.exception] */
/* Parse an (optional) noexcept-specification.
noexcept-specification:
noexcept ( constant-expression ) [opt]
If no noexcept-specification is present, returns NULL_TREE.
Otherwise, if REQUIRE_CONSTEXPR is false, then either parse and return any
expression if parentheses follow noexcept, or return BOOLEAN_TRUE_NODE if
there are no parentheses. CONSUMED_EXPR will be set accordingly.
Otherwise, returns a noexcept specification unless RETURN_COND is true,
in which case a boolean condition is returned instead. */
static tree
cp_parser_noexcept_specification_opt (cp_parser* parser,
bool require_constexpr,
bool* consumed_expr,
bool return_cond)
{
cp_token *token;
const char *saved_message;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Is it a noexcept-specification? */
if (cp_parser_is_keyword (token, RID_NOEXCEPT))
{
tree expr;
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN)
{
cp_lexer_consume_token (parser->lexer);
if (require_constexpr)
{
/* Types may not be defined in an exception-specification. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in an exception-specification");
expr = cp_parser_constant_expression (parser);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
}
else
{
expr = cp_parser_expression (parser);
*consumed_expr = true;
}
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
else
{
expr = boolean_true_node;
if (!require_constexpr)
*consumed_expr = false;
}
/* We cannot build a noexcept-spec right away because this will check
that expr is a constexpr. */
if (!return_cond)
return build_noexcept_spec (expr, tf_warning_or_error);
else
return expr;
}
else
return NULL_TREE;
}
/* Parse an (optional) exception-specification.
exception-specification:
throw ( type-id-list [opt] )
Returns a TREE_LIST representing the exception-specification. The
TREE_VALUE of each node is a type. */
static tree
cp_parser_exception_specification_opt (cp_parser* parser)
{
cp_token *token;
tree type_id_list;
const char *saved_message;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* Is it a noexcept-specification? */
type_id_list = cp_parser_noexcept_specification_opt(parser, true, NULL,
false);
if (type_id_list != NULL_TREE)
return type_id_list;
/* If it's not `throw', then there's no exception-specification. */
if (!cp_parser_is_keyword (token, RID_THROW))
return NULL_TREE;
#if 0
/* Enable this once a lot of code has transitioned to noexcept? */
if (cxx_dialect >= cxx11 && !in_system_header_at (input_location))
warning (OPT_Wdeprecated, "dynamic exception specifications are "
"deprecated in C++0x; use %<noexcept%> instead");
#endif
/* Consume the `throw'. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not a `)', then there is a type-id-list. */
if (token->type != CPP_CLOSE_PAREN)
{
/* Types may not be defined in an exception-specification. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in an exception-specification");
/* Parse the type-id-list. */
type_id_list = cp_parser_type_id_list (parser);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
}
else
type_id_list = empty_except_spec;
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return type_id_list;
}
/* Parse an (optional) type-id-list.
type-id-list:
type-id ... [opt]
type-id-list , type-id ... [opt]
Returns a TREE_LIST. The TREE_VALUE of each node is a TYPE,
in the order that the types were presented. */
static tree
cp_parser_type_id_list (cp_parser* parser)
{
tree types = NULL_TREE;
while (true)
{
cp_token *token;
tree type;
/* Get the next type-id. */
type = cp_parser_type_id (parser);
/* Parse the optional ellipsis. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
/* Turn the type into a pack expansion expression. */
type = make_pack_expansion (type);
}
/* Add it to the list. */
types = add_exception_specifier (types, type, /*complain=*/1);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it is not a `,', we are done. */
if (token->type != CPP_COMMA)
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
}
return nreverse (types);
}
/* Parse a try-block.
try-block:
try compound-statement handler-seq */
static tree
cp_parser_try_block (cp_parser* parser)
{
tree try_block;
cp_parser_require_keyword (parser, RID_TRY, RT_TRY);
if (parser->in_function_body
&& DECL_DECLARED_CONSTEXPR_P (current_function_decl))
error ("%<try%> in %<constexpr%> function");
try_block = begin_try_block ();
cp_parser_compound_statement (parser, NULL, true, false);
finish_try_block (try_block);
cp_parser_handler_seq (parser);
finish_handler_sequence (try_block);
return try_block;
}
/* Parse a function-try-block.
function-try-block:
try ctor-initializer [opt] function-body handler-seq */
static bool
cp_parser_function_try_block (cp_parser* parser)
{
tree compound_stmt;
tree try_block;
bool ctor_initializer_p;
/* Look for the `try' keyword. */
if (!cp_parser_require_keyword (parser, RID_TRY, RT_TRY))
return false;
/* Let the rest of the front end know where we are. */
try_block = begin_function_try_block (&compound_stmt);
/* Parse the function-body. */
ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body
(parser, /*in_function_try_block=*/true);
/* We're done with the `try' part. */
finish_function_try_block (try_block);
/* Parse the handlers. */
cp_parser_handler_seq (parser);
/* We're done with the handlers. */
finish_function_handler_sequence (try_block, compound_stmt);
return ctor_initializer_p;
}
/* Parse a handler-seq.
handler-seq:
handler handler-seq [opt] */
static void
cp_parser_handler_seq (cp_parser* parser)
{
while (true)
{
cp_token *token;
/* Parse the handler. */
cp_parser_handler (parser);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not `catch' then there are no more handlers. */
if (!cp_parser_is_keyword (token, RID_CATCH))
break;
}
}
/* Parse a handler.
handler:
catch ( exception-declaration ) compound-statement */
static void
cp_parser_handler (cp_parser* parser)
{
tree handler;
tree declaration;
cp_parser_require_keyword (parser, RID_CATCH, RT_CATCH);
handler = begin_handler ();
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
declaration = cp_parser_exception_declaration (parser);
finish_handler_parms (declaration, handler);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
cp_parser_compound_statement (parser, NULL, false, false);
finish_handler (handler);
}
/* Parse an exception-declaration.
exception-declaration:
type-specifier-seq declarator
type-specifier-seq abstract-declarator
type-specifier-seq
...
Returns a VAR_DECL for the declaration, or NULL_TREE if the
ellipsis variant is used. */
static tree
cp_parser_exception_declaration (cp_parser* parser)
{
cp_decl_specifier_seq type_specifiers;
cp_declarator *declarator;
const char *saved_message;
/* If it's an ellipsis, it's easy to handle. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* Consume the `...' token. */
cp_lexer_consume_token (parser->lexer);
return NULL_TREE;
}
/* Types may not be defined in exception-declarations. */
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in exception-declarations");
/* Parse the type-specifier-seq. */
cp_parser_type_specifier_seq (parser, /*is_declaration=*/true,
/*is_trailing_return=*/false,
&type_specifiers);
/* If it's a `)', then there is no declarator. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
declarator = NULL;
else
declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false,
/*friend_p=*/false);
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
if (!type_specifiers.any_specifiers_p)
return error_mark_node;
return grokdeclarator (declarator, &type_specifiers, CATCHPARM, 1, NULL);
}
/* Parse a throw-expression.
throw-expression:
throw assignment-expression [opt]
Returns a THROW_EXPR representing the throw-expression. */
static tree
cp_parser_throw_expression (cp_parser* parser)
{
tree expression;
cp_token* token;
cp_parser_require_keyword (parser, RID_THROW, RT_THROW);
token = cp_lexer_peek_token (parser->lexer);
/* Figure out whether or not there is an assignment-expression
following the "throw" keyword. */
if (token->type == CPP_COMMA
|| token->type == CPP_SEMICOLON
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE
|| token->type == CPP_CLOSE_BRACE
|| token->type == CPP_COLON)
expression = NULL_TREE;
else
expression = cp_parser_assignment_expression (parser);
return build_throw (expression);
}
/* GNU Extensions */
/* Parse an (optional) asm-specification.
asm-specification:
asm ( string-literal )
If the asm-specification is present, returns a STRING_CST
corresponding to the string-literal. Otherwise, returns
NULL_TREE. */
static tree
cp_parser_asm_specification_opt (cp_parser* parser)
{
cp_token *token;
tree asm_specification;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If the next token isn't the `asm' keyword, then there's no
asm-specification. */
if (!cp_parser_is_keyword (token, RID_ASM))
return NULL_TREE;
/* Consume the `asm' token. */
cp_lexer_consume_token (parser->lexer);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Look for the string-literal. */
asm_specification = cp_parser_string_literal (parser, false, false);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return asm_specification;
}
/* Parse an asm-operand-list.
asm-operand-list:
asm-operand
asm-operand-list , asm-operand
asm-operand:
string-literal ( expression )
[ string-literal ] string-literal ( expression )
Returns a TREE_LIST representing the operands. The TREE_VALUE of
each node is the expression. The TREE_PURPOSE is itself a
TREE_LIST whose TREE_PURPOSE is a STRING_CST for the bracketed
string-literal (or NULL_TREE if not present) and whose TREE_VALUE
is a STRING_CST for the string literal before the parenthesis. Returns
ERROR_MARK_NODE if any of the operands are invalid. */
static tree
cp_parser_asm_operand_list (cp_parser* parser)
{
tree asm_operands = NULL_TREE;
bool invalid_operands = false;
while (true)
{
tree string_literal;
tree expression;
tree name;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
/* Consume the `[' token. */
cp_lexer_consume_token (parser->lexer);
/* Read the operand name. */
name = cp_parser_identifier (parser);
if (name != error_mark_node)
name = build_string (IDENTIFIER_LENGTH (name),
IDENTIFIER_POINTER (name));
/* Look for the closing `]'. */
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
}
else
name = NULL_TREE;
/* Look for the string-literal. */
string_literal = cp_parser_string_literal (parser, false, false);
/* Look for the `('. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Parse the expression. */
expression = cp_parser_expression (parser);
/* Look for the `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
if (name == error_mark_node
|| string_literal == error_mark_node
|| expression == error_mark_node)
invalid_operands = true;
/* Add this operand to the list. */
asm_operands = tree_cons (build_tree_list (name, string_literal),
expression,
asm_operands);
/* If the next token is not a `,', there are no more
operands. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,'. */
cp_lexer_consume_token (parser->lexer);
}
return invalid_operands ? error_mark_node : nreverse (asm_operands);
}
/* Parse an asm-clobber-list.
asm-clobber-list:
string-literal
asm-clobber-list , string-literal
Returns a TREE_LIST, indicating the clobbers in the order that they
appeared. The TREE_VALUE of each node is a STRING_CST. */
static tree
cp_parser_asm_clobber_list (cp_parser* parser)
{
tree clobbers = NULL_TREE;
while (true)
{
tree string_literal;
/* Look for the string literal. */
string_literal = cp_parser_string_literal (parser, false, false);
/* Add it to the list. */
clobbers = tree_cons (NULL_TREE, string_literal, clobbers);
/* If the next token is not a `,', then the list is
complete. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return clobbers;
}
/* Parse an asm-label-list.
asm-label-list:
identifier
asm-label-list , identifier
Returns a TREE_LIST, indicating the labels in the order that they
appeared. The TREE_VALUE of each node is a label. */
static tree
cp_parser_asm_label_list (cp_parser* parser)
{
tree labels = NULL_TREE;
while (true)
{
tree identifier, label, name;
/* Look for the identifier. */
identifier = cp_parser_identifier (parser);
if (!error_operand_p (identifier))
{
label = lookup_label (identifier);
if (TREE_CODE (label) == LABEL_DECL)
{
TREE_USED (label) = 1;
check_goto (label);
name = build_string (IDENTIFIER_LENGTH (identifier),
IDENTIFIER_POINTER (identifier));
labels = tree_cons (name, label, labels);
}
}
/* If the next token is not a `,', then the list is
complete. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
/* Consume the `,' token. */
cp_lexer_consume_token (parser->lexer);
}
return nreverse (labels);
}
/* Return TRUE iff the next tokens in the stream are possibly the
beginning of a GNU extension attribute. */
static bool
cp_next_tokens_can_be_gnu_attribute_p (cp_parser *parser)
{
return cp_nth_tokens_can_be_gnu_attribute_p (parser, 1);
}
/* Return TRUE iff the next tokens in the stream are possibly the
beginning of a standard C++-11 attribute specifier. */
static bool
cp_next_tokens_can_be_std_attribute_p (cp_parser *parser)
{
return cp_nth_tokens_can_be_std_attribute_p (parser, 1);
}
/* Return TRUE iff the next Nth tokens in the stream are possibly the
beginning of a standard C++-11 attribute specifier. */
static bool
cp_nth_tokens_can_be_std_attribute_p (cp_parser *parser, size_t n)
{
cp_token *token = cp_lexer_peek_nth_token (parser->lexer, n);
return (cxx_dialect >= cxx11
&& ((token->type == CPP_KEYWORD && token->keyword == RID_ALIGNAS)
|| (token->type == CPP_OPEN_SQUARE
&& (token = cp_lexer_peek_nth_token (parser->lexer, n + 1))
&& token->type == CPP_OPEN_SQUARE)));
}
/* Return TRUE iff the next Nth tokens in the stream are possibly the
beginning of a GNU extension attribute. */
static bool
cp_nth_tokens_can_be_gnu_attribute_p (cp_parser *parser, size_t n)
{
cp_token *token = cp_lexer_peek_nth_token (parser->lexer, n);
return token->type == CPP_KEYWORD && token->keyword == RID_ATTRIBUTE;
}
/* Return true iff the next tokens can be the beginning of either a
GNU attribute list, or a standard C++11 attribute sequence. */
static bool
cp_next_tokens_can_be_attribute_p (cp_parser *parser)
{
return (cp_next_tokens_can_be_gnu_attribute_p (parser)
|| cp_next_tokens_can_be_std_attribute_p (parser));
}
/* Return true iff the next Nth tokens can be the beginning of either
a GNU attribute list, or a standard C++11 attribute sequence. */
static bool
cp_nth_tokens_can_be_attribute_p (cp_parser *parser, size_t n)
{
return (cp_nth_tokens_can_be_gnu_attribute_p (parser, n)
|| cp_nth_tokens_can_be_std_attribute_p (parser, n));
}
/* Parse either a standard C++-11 attribute-specifier-seq, or a series
of GNU attributes, or return NULL. */
static tree
cp_parser_attributes_opt (cp_parser *parser)
{
if (cp_next_tokens_can_be_gnu_attribute_p (parser))
return cp_parser_gnu_attributes_opt (parser);
return cp_parser_std_attribute_spec_seq (parser);
}
#define CILK_SIMD_FN_CLAUSE_MASK \
((OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_VECTORLENGTH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_LINEAR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_UNIFORM) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_MASK) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_NOMASK))
/* Parses the Cilk Plus SIMD-enabled function's attribute. Syntax:
vector [(<clauses>)] */
static void
cp_parser_cilk_simd_fn_vector_attrs (cp_parser *parser, cp_token *v_token)
{
bool first_p = parser->cilk_simd_fn_info == NULL;
cp_token *token = v_token;
if (first_p)
{
parser->cilk_simd_fn_info = XNEW (cp_omp_declare_simd_data);
parser->cilk_simd_fn_info->error_seen = false;
parser->cilk_simd_fn_info->fndecl_seen = false;
parser->cilk_simd_fn_info->tokens = vNULL;
}
int paren_scope = 0;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
cp_lexer_consume_token (parser->lexer);
v_token = cp_lexer_peek_token (parser->lexer);
paren_scope++;
}
while (paren_scope > 0)
{
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_OPEN_PAREN)
paren_scope++;
else if (token->type == CPP_CLOSE_PAREN)
paren_scope--;
/* Do not push the last ')' */
if (!(token->type == CPP_CLOSE_PAREN && paren_scope == 0))
cp_lexer_consume_token (parser->lexer);
}
token->type = CPP_PRAGMA_EOL;
parser->lexer->next_token = token;
cp_lexer_consume_token (parser->lexer);
struct cp_token_cache *cp
= cp_token_cache_new (v_token, cp_lexer_peek_token (parser->lexer));
parser->cilk_simd_fn_info->tokens.safe_push (cp);
}
/* Parse an (optional) series of attributes.
attributes:
attributes attribute
attribute:
__attribute__ (( attribute-list [opt] ))
The return value is as for cp_parser_gnu_attribute_list. */
static tree
cp_parser_gnu_attributes_opt (cp_parser* parser)
{
tree attributes = NULL_TREE;
while (true)
{
cp_token *token;
tree attribute_list;
bool ok = true;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's not `__attribute__', then we're done. */
if (token->keyword != RID_ATTRIBUTE)
break;
/* Consume the `__attribute__' keyword. */
cp_lexer_consume_token (parser->lexer);
/* Look for the two `(' tokens. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_CLOSE_PAREN)
/* Parse the attribute-list. */
attribute_list = cp_parser_gnu_attribute_list (parser);
else
/* If the next token is a `)', then there is no attribute
list. */
attribute_list = NULL;
/* Look for the two `)' tokens. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
ok = false;
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
ok = false;
if (!ok)
cp_parser_skip_to_end_of_statement (parser);
/* Add these new attributes to the list. */
attributes = chainon (attributes, attribute_list);
}
return attributes;
}
/* Returns true of NAME is an IDENTIFIER_NODE with identiifer "vector,"
"__vector" or "__vector__." */
static inline bool
is_cilkplus_vector_p (tree name)
{
if (flag_cilkplus && is_attribute_p ("vector", name))
return true;
return false;
}
/* Parse a GNU attribute-list.
attribute-list:
attribute
attribute-list , attribute
attribute:
identifier
identifier ( identifier )
identifier ( identifier , expression-list )
identifier ( expression-list )
Returns a TREE_LIST, or NULL_TREE on error. Each node corresponds
to an attribute. The TREE_PURPOSE of each node is the identifier
indicating which attribute is in use. The TREE_VALUE represents
the arguments, if any. */
static tree
cp_parser_gnu_attribute_list (cp_parser* parser)
{
tree attribute_list = NULL_TREE;
bool save_translate_strings_p = parser->translate_strings_p;
parser->translate_strings_p = false;
while (true)
{
cp_token *token;
tree identifier;
tree attribute;
/* Look for the identifier. We also allow keywords here; for
example `__attribute__ ((const))' is legal. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME
|| token->type == CPP_KEYWORD)
{
tree arguments = NULL_TREE;
/* Consume the token, but save it since we need it for the
SIMD enabled function parsing. */
cp_token *id_token = cp_lexer_consume_token (parser->lexer);
/* Save away the identifier that indicates which attribute
this is. */
identifier = (token->type == CPP_KEYWORD)
/* For keywords, use the canonical spelling, not the
parsed identifier. */
? ridpointers[(int) token->keyword]
: id_token->u.value;
attribute = build_tree_list (identifier, NULL_TREE);
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If it's an `(', then parse the attribute arguments. */
if (token->type == CPP_OPEN_PAREN)
{
vec<tree, va_gc> *vec;
int attr_flag = (attribute_takes_identifier_p (identifier)
? id_attr : normal_attr);
if (is_cilkplus_vector_p (identifier))
{
cp_parser_cilk_simd_fn_vector_attrs (parser, id_token);
continue;
}
else
vec = cp_parser_parenthesized_expression_list
(parser, attr_flag, /*cast_p=*/false,
/*allow_expansion_p=*/false,
/*non_constant_p=*/NULL);
if (vec == NULL)
arguments = error_mark_node;
else
{
arguments = build_tree_list_vec (vec);
release_tree_vector (vec);
}
/* Save the arguments away. */
TREE_VALUE (attribute) = arguments;
}
else if (is_cilkplus_vector_p (identifier))
{
cp_parser_cilk_simd_fn_vector_attrs (parser, id_token);
continue;
}
if (arguments != error_mark_node)
{
/* Add this attribute to the list. */
TREE_CHAIN (attribute) = attribute_list;
attribute_list = attribute;
}
token = cp_lexer_peek_token (parser->lexer);
}
/* Now, look for more attributes. If the next token isn't a
`,', we're done. */
if (token->type != CPP_COMMA)
break;
/* Consume the comma and keep going. */
cp_lexer_consume_token (parser->lexer);
}
parser->translate_strings_p = save_translate_strings_p;
/* We built up the list in reverse order. */
return nreverse (attribute_list);
}
/* Parse a standard C++11 attribute.
The returned representation is a TREE_LIST which TREE_PURPOSE is
the scoped name of the attribute, and the TREE_VALUE is its
arguments list.
Note that the scoped name of the attribute is itself a TREE_LIST
which TREE_PURPOSE is the namespace of the attribute, and
TREE_VALUE its name. This is unlike a GNU attribute -- as parsed
by cp_parser_gnu_attribute_list -- that doesn't have any namespace
and which TREE_PURPOSE is directly the attribute name.
Clients of the attribute code should use get_attribute_namespace
and get_attribute_name to get the actual namespace and name of
attributes, regardless of their being GNU or C++11 attributes.
attribute:
attribute-token attribute-argument-clause [opt]
attribute-token:
identifier
attribute-scoped-token
attribute-scoped-token:
attribute-namespace :: identifier
attribute-namespace:
identifier
attribute-argument-clause:
( balanced-token-seq )
balanced-token-seq:
balanced-token [opt]
balanced-token-seq balanced-token
balanced-token:
( balanced-token-seq )
[ balanced-token-seq ]
{ balanced-token-seq }. */
static tree
cp_parser_std_attribute (cp_parser *parser)
{
tree attribute, attr_ns = NULL_TREE, attr_id = NULL_TREE, arguments;
cp_token *token;
/* First, parse name of the the attribute, a.k.a
attribute-token. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME)
attr_id = token->u.value;
else if (token->type == CPP_KEYWORD)
attr_id = ridpointers[(int) token->keyword];
else if (token->flags & NAMED_OP)
attr_id = get_identifier (cpp_type2name (token->type, token->flags));
if (attr_id == NULL_TREE)
return NULL_TREE;
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_SCOPE)
{
/* We are seeing a scoped attribute token. */
cp_lexer_consume_token (parser->lexer);
attr_ns = attr_id;
token = cp_lexer_consume_token (parser->lexer);
if (token->type == CPP_NAME)
attr_id = token->u.value;
else if (token->type == CPP_KEYWORD)
attr_id = ridpointers[(int) token->keyword];
else
{
error_at (token->location,
"expected an identifier for the attribute name");
return error_mark_node;
}
attribute = build_tree_list (build_tree_list (attr_ns, attr_id),
NULL_TREE);
token = cp_lexer_peek_token (parser->lexer);
}
else
{
attribute = build_tree_list (build_tree_list (NULL_TREE, attr_id),
NULL_TREE);
/* C++11 noreturn attribute is equivalent to GNU's. */
if (is_attribute_p ("noreturn", attr_id))
TREE_PURPOSE (TREE_PURPOSE (attribute)) = get_identifier ("gnu");
/* C++14 deprecated attribute is equivalent to GNU's. */
else if (cxx_dialect >= cxx11 && is_attribute_p ("deprecated", attr_id))
{
if (cxx_dialect == cxx11)
pedwarn (token->location, OPT_Wpedantic,
"%<deprecated%> is a C++14 feature;"
" use %<gnu::deprecated%>");
TREE_PURPOSE (TREE_PURPOSE (attribute)) = get_identifier ("gnu");
}
}
/* Now parse the optional argument clause of the attribute. */
if (token->type != CPP_OPEN_PAREN)
return attribute;
{
vec<tree, va_gc> *vec;
int attr_flag = normal_attr;
if (attr_ns == get_identifier ("gnu")
&& attribute_takes_identifier_p (attr_id))
/* A GNU attribute that takes an identifier in parameter. */
attr_flag = id_attr;
vec = cp_parser_parenthesized_expression_list
(parser, attr_flag, /*cast_p=*/false,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL);
if (vec == NULL)
arguments = error_mark_node;
else
{
arguments = build_tree_list_vec (vec);
release_tree_vector (vec);
}
if (arguments == error_mark_node)
attribute = error_mark_node;
else
TREE_VALUE (attribute) = arguments;
}
return attribute;
}
/* Parse a list of standard C++-11 attributes.
attribute-list:
attribute [opt]
attribute-list , attribute[opt]
attribute ...
attribute-list , attribute ...
*/
static tree
cp_parser_std_attribute_list (cp_parser *parser)
{
tree attributes = NULL_TREE, attribute = NULL_TREE;
cp_token *token = NULL;
while (true)
{
attribute = cp_parser_std_attribute (parser);
if (attribute == error_mark_node)
break;
if (attribute != NULL_TREE)
{
TREE_CHAIN (attribute) = attributes;
attributes = attribute;
}
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_COMMA)
break;
cp_lexer_consume_token (parser->lexer);
}
attributes = nreverse (attributes);
return attributes;
}
/* Parse a standard C++-11 attribute specifier.
attribute-specifier:
[ [ attribute-list ] ]
alignment-specifier
alignment-specifier:
alignas ( type-id ... [opt] )
alignas ( alignment-expression ... [opt] ). */
static tree
cp_parser_std_attribute_spec (cp_parser *parser)
{
tree attributes = NULL_TREE;
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_OPEN_SQUARE
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_OPEN_SQUARE)
{
cp_lexer_consume_token (parser->lexer);
cp_lexer_consume_token (parser->lexer);
attributes = cp_parser_std_attribute_list (parser);
if (!cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE)
|| !cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE))
cp_parser_skip_to_end_of_statement (parser);
else
/* Warn about parsing c++11 attribute in non-c++1 mode, only
when we are sure that we have actually parsed them. */
maybe_warn_cpp0x (CPP0X_ATTRIBUTES);
}
else
{
tree alignas_expr;
/* Look for an alignment-specifier. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_KEYWORD
|| token->keyword != RID_ALIGNAS)
return NULL_TREE;
cp_lexer_consume_token (parser->lexer);
maybe_warn_cpp0x (CPP0X_ATTRIBUTES);
if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN) == NULL)
{
cp_parser_error (parser, "expected %<(%>");
return error_mark_node;
}
cp_parser_parse_tentatively (parser);
alignas_expr = cp_parser_type_id (parser);
if (!cp_parser_parse_definitely (parser))
{
gcc_assert (alignas_expr == error_mark_node
|| alignas_expr == NULL_TREE);
alignas_expr =
cp_parser_assignment_expression (parser);
if (alignas_expr == error_mark_node)
cp_parser_skip_to_end_of_statement (parser);
if (alignas_expr == NULL_TREE
|| alignas_expr == error_mark_node)
return alignas_expr;
}
if (cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN) == NULL)
{
cp_parser_error (parser, "expected %<)%>");
return error_mark_node;
}
alignas_expr = cxx_alignas_expr (alignas_expr);
/* Build the C++-11 representation of an 'aligned'
attribute. */
attributes =
build_tree_list (build_tree_list (get_identifier ("gnu"),
get_identifier ("aligned")),
build_tree_list (NULL_TREE, alignas_expr));
}
return attributes;
}
/* Parse a standard C++-11 attribute-specifier-seq.
attribute-specifier-seq:
attribute-specifier-seq [opt] attribute-specifier
*/
static tree
cp_parser_std_attribute_spec_seq (cp_parser *parser)
{
tree attr_specs = NULL;
while (true)
{
tree attr_spec = cp_parser_std_attribute_spec (parser);
if (attr_spec == NULL_TREE)
break;
if (attr_spec == error_mark_node)
return error_mark_node;
TREE_CHAIN (attr_spec) = attr_specs;
attr_specs = attr_spec;
}
attr_specs = nreverse (attr_specs);
return attr_specs;
}
/* Parse an optional `__extension__' keyword. Returns TRUE if it is
present, and FALSE otherwise. *SAVED_PEDANTIC is set to the
current value of the PEDANTIC flag, regardless of whether or not
the `__extension__' keyword is present. The caller is responsible
for restoring the value of the PEDANTIC flag. */
static bool
cp_parser_extension_opt (cp_parser* parser, int* saved_pedantic)
{
/* Save the old value of the PEDANTIC flag. */
*saved_pedantic = pedantic;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXTENSION))
{
/* Consume the `__extension__' token. */
cp_lexer_consume_token (parser->lexer);
/* We're not being pedantic while the `__extension__' keyword is
in effect. */
pedantic = 0;
return true;
}
return false;
}
/* Parse a label declaration.
label-declaration:
__label__ label-declarator-seq ;
label-declarator-seq:
identifier , label-declarator-seq
identifier */
static void
cp_parser_label_declaration (cp_parser* parser)
{
/* Look for the `__label__' keyword. */
cp_parser_require_keyword (parser, RID_LABEL, RT_LABEL);
while (true)
{
tree identifier;
/* Look for an identifier. */
identifier = cp_parser_identifier (parser);
/* If we failed, stop. */
if (identifier == error_mark_node)
break;
/* Declare it as a label. */
finish_label_decl (identifier);
/* If the next token is a `;', stop. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
break;
/* Look for the `,' separating the label declarations. */
cp_parser_require (parser, CPP_COMMA, RT_COMMA);
}
/* Look for the final `;'. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
}
/* Support Functions */
/* Looks up NAME in the current scope, as given by PARSER->SCOPE.
NAME should have one of the representations used for an
id-expression. If NAME is the ERROR_MARK_NODE, the ERROR_MARK_NODE
is returned. If PARSER->SCOPE is a dependent type, then a
SCOPE_REF is returned.
If NAME is a TEMPLATE_ID_EXPR, then it will be immediately
returned; the name was already resolved when the TEMPLATE_ID_EXPR
was formed. Abstractly, such entities should not be passed to this
function, because they do not need to be looked up, but it is
simpler to check for this special case here, rather than at the
call-sites.
In cases not explicitly covered above, this function returns a
DECL, OVERLOAD, or baselink representing the result of the lookup.
If there was no entity with the indicated NAME, the ERROR_MARK_NODE
is returned.
If TAG_TYPE is not NONE_TYPE, it indicates an explicit type keyword
(e.g., "struct") that was used. In that case bindings that do not
refer to types are ignored.
If IS_TEMPLATE is TRUE, bindings that do not refer to templates are
ignored.
If IS_NAMESPACE is TRUE, bindings that do not refer to namespaces
are ignored.
If CHECK_DEPENDENCY is TRUE, names are not looked up in dependent
types.
If AMBIGUOUS_DECLS is non-NULL, *AMBIGUOUS_DECLS is set to a
TREE_LIST of candidates if name-lookup results in an ambiguity, and
NULL_TREE otherwise. */
static tree
cp_parser_lookup_name (cp_parser *parser, tree name,
enum tag_types tag_type,
bool is_template,
bool is_namespace,
bool check_dependency,
tree *ambiguous_decls,
location_t name_location)
{
tree decl;
tree object_type = parser->context->object_type;
/* Assume that the lookup will be unambiguous. */
if (ambiguous_decls)
*ambiguous_decls = NULL_TREE;
/* Now that we have looked up the name, the OBJECT_TYPE (if any) is
no longer valid. Note that if we are parsing tentatively, and
the parse fails, OBJECT_TYPE will be automatically restored. */
parser->context->object_type = NULL_TREE;
if (name == error_mark_node)
return error_mark_node;
/* A template-id has already been resolved; there is no lookup to
do. */
if (TREE_CODE (name) == TEMPLATE_ID_EXPR)
return name;
if (BASELINK_P (name))
{
gcc_assert (TREE_CODE (BASELINK_FUNCTIONS (name))
== TEMPLATE_ID_EXPR);
return name;
}
/* A BIT_NOT_EXPR is used to represent a destructor. By this point,
it should already have been checked to make sure that the name
used matches the type being destroyed. */
if (TREE_CODE (name) == BIT_NOT_EXPR)
{
tree type;
/* Figure out to which type this destructor applies. */
if (parser->scope)
type = parser->scope;
else if (object_type)
type = object_type;
else
type = current_class_type;
/* If that's not a class type, there is no destructor. */
if (!type || !CLASS_TYPE_P (type))
return error_mark_node;
if (CLASSTYPE_LAZY_DESTRUCTOR (type))
lazily_declare_fn (sfk_destructor, type);
if (!CLASSTYPE_DESTRUCTORS (type))
return error_mark_node;
/* If it was a class type, return the destructor. */
return CLASSTYPE_DESTRUCTORS (type);
}
/* By this point, the NAME should be an ordinary identifier. If
the id-expression was a qualified name, the qualifying scope is
stored in PARSER->SCOPE at this point. */
gcc_assert (identifier_p (name));
/* Perform the lookup. */
if (parser->scope)
{
bool dependent_p;
if (parser->scope == error_mark_node)
return error_mark_node;
/* If the SCOPE is dependent, the lookup must be deferred until
the template is instantiated -- unless we are explicitly
looking up names in uninstantiated templates. Even then, we
cannot look up the name if the scope is not a class type; it
might, for example, be a template type parameter. */
dependent_p = (TYPE_P (parser->scope)
&& dependent_scope_p (parser->scope));
if ((check_dependency || !CLASS_TYPE_P (parser->scope))
&& dependent_p)
/* Defer lookup. */
decl = error_mark_node;
else
{
tree pushed_scope = NULL_TREE;
/* If PARSER->SCOPE is a dependent type, then it must be a
class type, and we must not be checking dependencies;
otherwise, we would have processed this lookup above. So
that PARSER->SCOPE is not considered a dependent base by
lookup_member, we must enter the scope here. */
if (dependent_p)
pushed_scope = push_scope (parser->scope);
/* If the PARSER->SCOPE is a template specialization, it
may be instantiated during name lookup. In that case,
errors may be issued. Even if we rollback the current
tentative parse, those errors are valid. */
decl = lookup_qualified_name (parser->scope, name,
tag_type != none_type,
/*complain=*/true);
/* 3.4.3.1: In a lookup in which the constructor is an acceptable
lookup result and the nested-name-specifier nominates a class C:
* if the name specified after the nested-name-specifier, when
looked up in C, is the injected-class-name of C (Clause 9), or
* if the name specified after the nested-name-specifier is the
same as the identifier or the simple-template-id's template-
name in the last component of the nested-name-specifier,
the name is instead considered to name the constructor of
class C. [ Note: for example, the constructor is not an
acceptable lookup result in an elaborated-type-specifier so
the constructor would not be used in place of the
injected-class-name. --end note ] Such a constructor name
shall be used only in the declarator-id of a declaration that
names a constructor or in a using-declaration. */
if (tag_type == none_type
&& DECL_SELF_REFERENCE_P (decl)
&& same_type_p (DECL_CONTEXT (decl), parser->scope))
decl = lookup_qualified_name (parser->scope, ctor_identifier,
tag_type != none_type,
/*complain=*/true);
/* If we have a single function from a using decl, pull it out. */
if (TREE_CODE (decl) == OVERLOAD
&& !really_overloaded_fn (decl))
decl = OVL_FUNCTION (decl);
if (pushed_scope)
pop_scope (pushed_scope);
}
/* If the scope is a dependent type and either we deferred lookup or
we did lookup but didn't find the name, rememeber the name. */
if (decl == error_mark_node && TYPE_P (parser->scope)
&& dependent_type_p (parser->scope))
{
if (tag_type)
{
tree type;
/* The resolution to Core Issue 180 says that `struct
A::B' should be considered a type-name, even if `A'
is dependent. */
type = make_typename_type (parser->scope, name, tag_type,
/*complain=*/tf_error);
if (type != error_mark_node)
decl = TYPE_NAME (type);
}
else if (is_template
&& (cp_parser_next_token_ends_template_argument_p (parser)
|| cp_lexer_next_token_is (parser->lexer,
CPP_CLOSE_PAREN)))
decl = make_unbound_class_template (parser->scope,
name, NULL_TREE,
/*complain=*/tf_error);
else
decl = build_qualified_name (/*type=*/NULL_TREE,
parser->scope, name,
is_template);
}
parser->qualifying_scope = parser->scope;
parser->object_scope = NULL_TREE;
}
else if (object_type)
{
/* Look up the name in the scope of the OBJECT_TYPE, unless the
OBJECT_TYPE is not a class. */
if (CLASS_TYPE_P (object_type))
/* If the OBJECT_TYPE is a template specialization, it may
be instantiated during name lookup. In that case, errors
may be issued. Even if we rollback the current tentative
parse, those errors are valid. */
decl = lookup_member (object_type,
name,
/*protect=*/0,
tag_type != none_type,
tf_warning_or_error);
else
decl = NULL_TREE;
if (!decl)
/* Look it up in the enclosing context. */
decl = lookup_name_real (name, tag_type != none_type,
/*nonclass=*/0,
/*block_p=*/true, is_namespace, 0);
parser->object_scope = object_type;
parser->qualifying_scope = NULL_TREE;
}
else
{
decl = lookup_name_real (name, tag_type != none_type,
/*nonclass=*/0,
/*block_p=*/true, is_namespace, 0);
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
/* If the lookup failed, let our caller know. */
if (!decl || decl == error_mark_node)
return error_mark_node;
/* Pull out the template from an injected-class-name (or multiple). */
if (is_template)
decl = maybe_get_template_decl_from_type_decl (decl);
/* If it's a TREE_LIST, the result of the lookup was ambiguous. */
if (TREE_CODE (decl) == TREE_LIST)
{
if (ambiguous_decls)
*ambiguous_decls = decl;
/* The error message we have to print is too complicated for
cp_parser_error, so we incorporate its actions directly. */
if (!cp_parser_simulate_error (parser))
{
error_at (name_location, "reference to %qD is ambiguous",
name);
print_candidates (decl);
}
return error_mark_node;
}
gcc_assert (DECL_P (decl)
|| TREE_CODE (decl) == OVERLOAD
|| TREE_CODE (decl) == SCOPE_REF
|| TREE_CODE (decl) == UNBOUND_CLASS_TEMPLATE
|| BASELINK_P (decl));
/* If we have resolved the name of a member declaration, check to
see if the declaration is accessible. When the name resolves to
set of overloaded functions, accessibility is checked when
overload resolution is done.
During an explicit instantiation, access is not checked at all,
as per [temp.explicit]. */
if (DECL_P (decl))
check_accessibility_of_qualified_id (decl, object_type, parser->scope);
maybe_record_typedef_use (decl);
return decl;
}
/* Like cp_parser_lookup_name, but for use in the typical case where
CHECK_ACCESS is TRUE, IS_TYPE is FALSE, IS_TEMPLATE is FALSE,
IS_NAMESPACE is FALSE, and CHECK_DEPENDENCY is TRUE. */
static tree
cp_parser_lookup_name_simple (cp_parser* parser, tree name, location_t location)
{
return cp_parser_lookup_name (parser, name,
none_type,
/*is_template=*/false,
/*is_namespace=*/false,
/*check_dependency=*/true,
/*ambiguous_decls=*/NULL,
location);
}
/* If DECL is a TEMPLATE_DECL that can be treated like a TYPE_DECL in
the current context, return the TYPE_DECL. If TAG_NAME_P is
true, the DECL indicates the class being defined in a class-head,
or declared in an elaborated-type-specifier.
Otherwise, return DECL. */
static tree
cp_parser_maybe_treat_template_as_class (tree decl, bool tag_name_p)
{
/* If the TEMPLATE_DECL is being declared as part of a class-head,
the translation from TEMPLATE_DECL to TYPE_DECL occurs:
struct A {
template <typename T> struct B;
};
template <typename T> struct A::B {};
Similarly, in an elaborated-type-specifier:
namespace N { struct X{}; }
struct A {
template <typename T> friend struct N::X;
};
However, if the DECL refers to a class type, and we are in
the scope of the class, then the name lookup automatically
finds the TYPE_DECL created by build_self_reference rather
than a TEMPLATE_DECL. For example, in:
template <class T> struct S {
S s;
};
there is no need to handle such case. */
if (DECL_CLASS_TEMPLATE_P (decl) && tag_name_p)
return DECL_TEMPLATE_RESULT (decl);
return decl;
}
/* If too many, or too few, template-parameter lists apply to the
declarator, issue an error message. Returns TRUE if all went well,
and FALSE otherwise. */
static bool
cp_parser_check_declarator_template_parameters (cp_parser* parser,
cp_declarator *declarator,
location_t declarator_location)
{
switch (declarator->kind)
{
case cdk_id:
{
unsigned num_templates = 0;
tree scope = declarator->u.id.qualifying_scope;
if (scope)
num_templates = num_template_headers_for_class (scope);
else if (TREE_CODE (declarator->u.id.unqualified_name)
== TEMPLATE_ID_EXPR)
/* If the DECLARATOR has the form `X<y>' then it uses one
additional level of template parameters. */
++num_templates;
return cp_parser_check_template_parameters
(parser, num_templates, declarator_location, declarator);
}
case cdk_function:
case cdk_array:
case cdk_pointer:
case cdk_reference:
case cdk_ptrmem:
return (cp_parser_check_declarator_template_parameters
(parser, declarator->declarator, declarator_location));
case cdk_error:
return true;
default:
gcc_unreachable ();
}
return false;
}
/* NUM_TEMPLATES were used in the current declaration. If that is
invalid, return FALSE and issue an error messages. Otherwise,
return TRUE. If DECLARATOR is non-NULL, then we are checking a
declarator and we can print more accurate diagnostics. */
static bool
cp_parser_check_template_parameters (cp_parser* parser,
unsigned num_templates,
location_t location,
cp_declarator *declarator)
{
/* If there are the same number of template classes and parameter
lists, that's OK. */
if (parser->num_template_parameter_lists == num_templates)
return true;
/* If there are more, but only one more, then we are referring to a
member template. That's OK too. */
if (parser->num_template_parameter_lists == num_templates + 1)
return true;
/* If there are more template classes than parameter lists, we have
something like:
template <class T> void S<T>::R<T>::f (); */
if (parser->num_template_parameter_lists < num_templates)
{
if (declarator && !current_function_decl)
error_at (location, "specializing member %<%T::%E%> "
"requires %<template<>%> syntax",
declarator->u.id.qualifying_scope,
declarator->u.id.unqualified_name);
else if (declarator)
error_at (location, "invalid declaration of %<%T::%E%>",
declarator->u.id.qualifying_scope,
declarator->u.id.unqualified_name);
else
error_at (location, "too few template-parameter-lists");
return false;
}
/* Otherwise, there are too many template parameter lists. We have
something like:
template <class T> template <class U> void S::f(); */
error_at (location, "too many template-parameter-lists");
return false;
}
/* Parse an optional `::' token indicating that the following name is
from the global namespace. If so, PARSER->SCOPE is set to the
GLOBAL_NAMESPACE. Otherwise, PARSER->SCOPE is set to NULL_TREE,
unless CURRENT_SCOPE_VALID_P is TRUE, in which case it is left alone.
Returns the new value of PARSER->SCOPE, if the `::' token is
present, and NULL_TREE otherwise. */
static tree
cp_parser_global_scope_opt (cp_parser* parser, bool current_scope_valid_p)
{
cp_token *token;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* If we're looking at a `::' token then we're starting from the
global namespace, not our current location. */
if (token->type == CPP_SCOPE)
{
/* Consume the `::' token. */
cp_lexer_consume_token (parser->lexer);
/* Set the SCOPE so that we know where to start the lookup. */
parser->scope = global_namespace;
parser->qualifying_scope = global_namespace;
parser->object_scope = NULL_TREE;
return parser->scope;
}
else if (!current_scope_valid_p)
{
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
return NULL_TREE;
}
/* Returns TRUE if the upcoming token sequence is the start of a
constructor declarator. If FRIEND_P is true, the declarator is
preceded by the `friend' specifier. */
static bool
cp_parser_constructor_declarator_p (cp_parser *parser, bool friend_p)
{
bool constructor_p;
bool outside_class_specifier_p;
tree nested_name_specifier;
cp_token *next_token;
/* The common case is that this is not a constructor declarator, so
try to avoid doing lots of work if at all possible. It's not
valid declare a constructor at function scope. */
if (parser->in_function_body)
return false;
/* And only certain tokens can begin a constructor declarator. */
next_token = cp_lexer_peek_token (parser->lexer);
if (next_token->type != CPP_NAME
&& next_token->type != CPP_SCOPE
&& next_token->type != CPP_NESTED_NAME_SPECIFIER
&& next_token->type != CPP_TEMPLATE_ID)
return false;
/* Parse tentatively; we are going to roll back all of the tokens
consumed here. */
cp_parser_parse_tentatively (parser);
/* Assume that we are looking at a constructor declarator. */
constructor_p = true;
/* Look for the optional `::' operator. */
cp_parser_global_scope_opt (parser,
/*current_scope_valid_p=*/false);
/* Look for the nested-name-specifier. */
nested_name_specifier
= (cp_parser_nested_name_specifier_opt (parser,
/*typename_keyword_p=*/false,
/*check_dependency_p=*/false,
/*type_p=*/false,
/*is_declaration=*/false));
outside_class_specifier_p = (!at_class_scope_p ()
|| !TYPE_BEING_DEFINED (current_class_type)
|| friend_p);
/* Outside of a class-specifier, there must be a
nested-name-specifier. */
if (!nested_name_specifier && outside_class_specifier_p)
constructor_p = false;
else if (nested_name_specifier == error_mark_node)
constructor_p = false;
/* If we have a class scope, this is easy; DR 147 says that S::S always
names the constructor, and no other qualified name could. */
if (constructor_p && nested_name_specifier
&& CLASS_TYPE_P (nested_name_specifier))
{
tree id = cp_parser_unqualified_id (parser,
/*template_keyword_p=*/false,
/*check_dependency_p=*/false,
/*declarator_p=*/true,
/*optional_p=*/false);
if (is_overloaded_fn (id))
id = DECL_NAME (get_first_fn (id));
if (!constructor_name_p (id, nested_name_specifier))
constructor_p = false;
}
/* If we still think that this might be a constructor-declarator,
look for a class-name. */
else if (constructor_p)
{
/* If we have:
template <typename T> struct S {
S();
};
we must recognize that the nested `S' names a class. */
tree type_decl;
type_decl = cp_parser_class_name (parser,
/*typename_keyword_p=*/false,
/*template_keyword_p=*/false,
none_type,
/*check_dependency_p=*/false,
/*class_head_p=*/false,
/*is_declaration=*/false);
/* If there was no class-name, then this is not a constructor.
Otherwise, if we are in a class-specifier and we aren't
handling a friend declaration, check that its type matches
current_class_type (c++/38313). Note: error_mark_node
is left alone for error recovery purposes. */
constructor_p = (!cp_parser_error_occurred (parser)
&& (outside_class_specifier_p
|| type_decl == error_mark_node
|| same_type_p (current_class_type,
TREE_TYPE (type_decl))));
/* If we're still considering a constructor, we have to see a `(',
to begin the parameter-declaration-clause, followed by either a
`)', an `...', or a decl-specifier. We need to check for a
type-specifier to avoid being fooled into thinking that:
S (f) (int);
is a constructor. (It is actually a function named `f' that
takes one parameter (of type `int') and returns a value of type
`S'. */
if (constructor_p
&& !cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
constructor_p = false;
if (constructor_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_ELLIPSIS)
/* A parameter declaration begins with a decl-specifier,
which is either the "attribute" keyword, a storage class
specifier, or (usually) a type-specifier. */
&& !cp_lexer_next_token_is_decl_specifier_keyword (parser->lexer))
{
tree type;
tree pushed_scope = NULL_TREE;
unsigned saved_num_template_parameter_lists;
/* Names appearing in the type-specifier should be looked up
in the scope of the class. */
if (current_class_type)
type = NULL_TREE;
else
{
type = TREE_TYPE (type_decl);
if (TREE_CODE (type) == TYPENAME_TYPE)
{
type = resolve_typename_type (type,
/*only_current_p=*/false);
if (TREE_CODE (type) == TYPENAME_TYPE)
{
cp_parser_abort_tentative_parse (parser);
return false;
}
}
pushed_scope = push_scope (type);
}
/* Inside the constructor parameter list, surrounding
template-parameter-lists do not apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
/* Look for the type-specifier. */
cp_parser_type_specifier (parser,
CP_PARSER_FLAGS_NONE,
/*decl_specs=*/NULL,
/*is_declarator=*/true,
/*declares_class_or_enum=*/NULL,
/*is_cv_qualifier=*/NULL);
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
/* Leave the scope of the class. */
if (pushed_scope)
pop_scope (pushed_scope);
constructor_p = !cp_parser_error_occurred (parser);
}
}
/* We did not really want to consume any tokens. */
cp_parser_abort_tentative_parse (parser);
return constructor_p;
}
/* Parse the definition of the function given by the DECL_SPECIFIERS,
ATTRIBUTES, and DECLARATOR. The access checks have been deferred;
they must be performed once we are in the scope of the function.
Returns the function defined. */
static tree
cp_parser_function_definition_from_specifiers_and_declarator
(cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
tree attributes,
const cp_declarator *declarator)
{
tree fn;
bool success_p;
/* Begin the function-definition. */
success_p = start_function (decl_specifiers, declarator, attributes);
/* The things we're about to see are not directly qualified by any
template headers we've seen thus far. */
reset_specialization ();
/* If there were names looked up in the decl-specifier-seq that we
did not check, check them now. We must wait until we are in the
scope of the function to perform the checks, since the function
might be a friend. */
perform_deferred_access_checks (tf_warning_or_error);
if (success_p)
{
cp_finalize_omp_declare_simd (parser, current_function_decl);
parser->omp_declare_simd = NULL;
}
if (!success_p)
{
/* Skip the entire function. */
cp_parser_skip_to_end_of_block_or_statement (parser);
fn = error_mark_node;
}
else if (DECL_INITIAL (current_function_decl) != error_mark_node)
{
/* Seen already, skip it. An error message has already been output. */
cp_parser_skip_to_end_of_block_or_statement (parser);
fn = current_function_decl;
current_function_decl = NULL_TREE;
/* If this is a function from a class, pop the nested class. */
if (current_class_name)
pop_nested_class ();
}
else
{
timevar_id_t tv;
if (DECL_DECLARED_INLINE_P (current_function_decl))
tv = TV_PARSE_INLINE;
else
tv = TV_PARSE_FUNC;
timevar_push (tv);
fn = cp_parser_function_definition_after_declarator (parser,
/*inline_p=*/false);
timevar_pop (tv);
}
return fn;
}
/* Parse the part of a function-definition that follows the
declarator. INLINE_P is TRUE iff this function is an inline
function defined within a class-specifier.
Returns the function defined. */
static tree
cp_parser_function_definition_after_declarator (cp_parser* parser,
bool inline_p)
{
tree fn;
bool ctor_initializer_p = false;
bool saved_in_unbraced_linkage_specification_p;
bool saved_in_function_body;
unsigned saved_num_template_parameter_lists;
cp_token *token;
bool fully_implicit_function_template_p
= parser->fully_implicit_function_template_p;
parser->fully_implicit_function_template_p = false;
tree implicit_template_parms
= parser->implicit_template_parms;
parser->implicit_template_parms = 0;
cp_binding_level* implicit_template_scope
= parser->implicit_template_scope;
parser->implicit_template_scope = 0;
saved_in_function_body = parser->in_function_body;
parser->in_function_body = true;
/* If the next token is `return', then the code may be trying to
make use of the "named return value" extension that G++ used to
support. */
token = cp_lexer_peek_token (parser->lexer);
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_RETURN))
{
/* Consume the `return' keyword. */
cp_lexer_consume_token (parser->lexer);
/* Look for the identifier that indicates what value is to be
returned. */
cp_parser_identifier (parser);
/* Issue an error message. */
error_at (token->location,
"named return values are no longer supported");
/* Skip tokens until we reach the start of the function body. */
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_EOF
|| token->type == CPP_PRAGMA_EOL)
break;
cp_lexer_consume_token (parser->lexer);
}
}
/* The `extern' in `extern "C" void f () { ... }' does not apply to
anything declared inside `f'. */
saved_in_unbraced_linkage_specification_p
= parser->in_unbraced_linkage_specification_p;
parser->in_unbraced_linkage_specification_p = false;
/* Inside the function, surrounding template-parameter-lists do not
apply. */
saved_num_template_parameter_lists
= parser->num_template_parameter_lists;
parser->num_template_parameter_lists = 0;
start_lambda_scope (current_function_decl);
/* If the next token is `try', `__transaction_atomic', or
`__transaction_relaxed`, then we are looking at either function-try-block
or function-transaction-block. Note that all of these include the
function-body. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRANSACTION_ATOMIC))
ctor_initializer_p = cp_parser_function_transaction (parser,
RID_TRANSACTION_ATOMIC);
else if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_TRANSACTION_RELAXED))
ctor_initializer_p = cp_parser_function_transaction (parser,
RID_TRANSACTION_RELAXED);
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY))
ctor_initializer_p = cp_parser_function_try_block (parser);
else
ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body
(parser, /*in_function_try_block=*/false);
finish_lambda_scope ();
/* Finish the function. */
fn = finish_function ((ctor_initializer_p ? 1 : 0) |
(inline_p ? 2 : 0));
/* Generate code for it, if necessary. */
expand_or_defer_fn (fn);
/* Restore the saved values. */
parser->in_unbraced_linkage_specification_p
= saved_in_unbraced_linkage_specification_p;
parser->num_template_parameter_lists
= saved_num_template_parameter_lists;
parser->in_function_body = saved_in_function_body;
parser->fully_implicit_function_template_p
= fully_implicit_function_template_p;
parser->implicit_template_parms
= implicit_template_parms;
parser->implicit_template_scope
= implicit_template_scope;
if (parser->fully_implicit_function_template_p)
finish_fully_implicit_template (parser, /*member_decl_opt=*/0);
return fn;
}
/* Parse a template-declaration, assuming that the `export' (and
`extern') keywords, if present, has already been scanned. MEMBER_P
is as for cp_parser_template_declaration. */
static void
cp_parser_template_declaration_after_export (cp_parser* parser, bool member_p)
{
tree decl = NULL_TREE;
vec<deferred_access_check, va_gc> *checks;
tree parameter_list;
bool friend_p = false;
bool need_lang_pop;
cp_token *token;
/* Look for the `template' keyword. */
token = cp_lexer_peek_token (parser->lexer);
if (!cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE))
return;
/* And the `<'. */
if (!cp_parser_require (parser, CPP_LESS, RT_LESS))
return;
if (at_class_scope_p () && current_function_decl)
{
/* 14.5.2.2 [temp.mem]
A local class shall not have member templates. */
error_at (token->location,
"invalid declaration of member template in local class");
cp_parser_skip_to_end_of_block_or_statement (parser);
return;
}
/* [temp]
A template ... shall not have C linkage. */
if (current_lang_name == lang_name_c)
{
error_at (token->location, "template with C linkage");
/* Give it C++ linkage to avoid confusing other parts of the
front end. */
push_lang_context (lang_name_cplusplus);
need_lang_pop = true;
}
else
need_lang_pop = false;
/* We cannot perform access checks on the template parameter
declarations until we know what is being declared, just as we
cannot check the decl-specifier list. */
push_deferring_access_checks (dk_deferred);
/* If the next token is `>', then we have an invalid
specialization. Rather than complain about an invalid template
parameter, issue an error message here. */
if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER))
{
cp_parser_error (parser, "invalid explicit specialization");
begin_specialization ();
parameter_list = NULL_TREE;
}
else
{
/* Parse the template parameters. */
parameter_list = cp_parser_template_parameter_list (parser);
}
/* Get the deferred access checks from the parameter list. These
will be checked once we know what is being declared, as for a
member template the checks must be performed in the scope of the
class containing the member. */
checks = get_deferred_access_checks ();
/* Look for the `>'. */
cp_parser_skip_to_end_of_template_parameter_list (parser);
/* We just processed one more parameter list. */
++parser->num_template_parameter_lists;
/* If the next token is `template', there are more template
parameters. */
if (cp_lexer_next_token_is_keyword (parser->lexer,
RID_TEMPLATE))
cp_parser_template_declaration_after_export (parser, member_p);
else if (cxx_dialect >= cxx11
&& cp_lexer_next_token_is_keyword (parser->lexer, RID_USING))
decl = cp_parser_alias_declaration (parser);
else
{
/* There are no access checks when parsing a template, as we do not
know if a specialization will be a friend. */
push_deferring_access_checks (dk_no_check);
token = cp_lexer_peek_token (parser->lexer);
decl = cp_parser_single_declaration (parser,
checks,
member_p,
/*explicit_specialization_p=*/false,
&friend_p);
pop_deferring_access_checks ();
/* If this is a member template declaration, let the front
end know. */
if (member_p && !friend_p && decl)
{
if (TREE_CODE (decl) == TYPE_DECL)
cp_parser_check_access_in_redeclaration (decl, token->location);
decl = finish_member_template_decl (decl);
}
else if (friend_p && decl
&& DECL_DECLARES_TYPE_P (decl))
make_friend_class (current_class_type, TREE_TYPE (decl),
/*complain=*/true);
}
/* We are done with the current parameter list. */
--parser->num_template_parameter_lists;
pop_deferring_access_checks ();
/* Finish up. */
finish_template_decl (parameter_list);
/* Check the template arguments for a literal operator template. */
if (decl
&& DECL_DECLARES_FUNCTION_P (decl)
&& UDLIT_OPER_P (DECL_NAME (decl)))
{
bool ok = true;
if (parameter_list == NULL_TREE)
ok = false;
else
{
int num_parms = TREE_VEC_LENGTH (parameter_list);
if (num_parms == 1)
{
tree parm_list = TREE_VEC_ELT (parameter_list, 0);
tree parm = INNERMOST_TEMPLATE_PARMS (parm_list);
if (TREE_TYPE (parm) != char_type_node
|| !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)))
ok = false;
}
else if (num_parms == 2 && cxx_dialect >= cxx14)
{
tree parm_type = TREE_VEC_ELT (parameter_list, 0);
tree type = INNERMOST_TEMPLATE_PARMS (parm_type);
tree parm_list = TREE_VEC_ELT (parameter_list, 1);
tree parm = INNERMOST_TEMPLATE_PARMS (parm_list);
if (TREE_TYPE (parm) != TREE_TYPE (type)
|| !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm)))
ok = false;
}
else
ok = false;
}
if (!ok)
{
if (cxx_dialect >= cxx14)
error ("literal operator template %qD has invalid parameter list."
" Expected non-type template argument pack <char...>"
" or <typename CharT, CharT...>",
decl);
else
error ("literal operator template %qD has invalid parameter list."
" Expected non-type template argument pack <char...>",
decl);
}
}
/* Register member declarations. */
if (member_p && !friend_p && decl && !DECL_CLASS_TEMPLATE_P (decl))
finish_member_declaration (decl);
/* For the erroneous case of a template with C linkage, we pushed an
implicit C++ linkage scope; exit that scope now. */
if (need_lang_pop)
pop_lang_context ();
/* If DECL is a function template, we must return to parse it later.
(Even though there is no definition, there might be default
arguments that need handling.) */
if (member_p && decl
&& DECL_DECLARES_FUNCTION_P (decl))
vec_safe_push (unparsed_funs_with_definitions, decl);
}
/* Perform the deferred access checks from a template-parameter-list.
CHECKS is a TREE_LIST of access checks, as returned by
get_deferred_access_checks. */
static void
cp_parser_perform_template_parameter_access_checks (vec<deferred_access_check, va_gc> *checks)
{
++processing_template_parmlist;
perform_access_checks (checks, tf_warning_or_error);
--processing_template_parmlist;
}
/* Parse a `decl-specifier-seq [opt] init-declarator [opt] ;' or
`function-definition' sequence that follows a template header.
If MEMBER_P is true, this declaration appears in a class scope.
Returns the DECL for the declared entity. If FRIEND_P is non-NULL,
*FRIEND_P is set to TRUE iff the declaration is a friend. */
static tree
cp_parser_single_declaration (cp_parser* parser,
vec<deferred_access_check, va_gc> *checks,
bool member_p,
bool explicit_specialization_p,
bool* friend_p)
{
int declares_class_or_enum;
tree decl = NULL_TREE;
cp_decl_specifier_seq decl_specifiers;
bool function_definition_p = false;
cp_token *decl_spec_token_start;
/* This function is only used when processing a template
declaration. */
gcc_assert (innermost_scope_kind () == sk_template_parms
|| innermost_scope_kind () == sk_template_spec);
/* Defer access checks until we know what is being declared. */
push_deferring_access_checks (dk_deferred);
/* Try the `decl-specifier-seq [opt] init-declarator [opt]'
alternative. */
decl_spec_token_start = cp_lexer_peek_token (parser->lexer);
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&decl_specifiers,
&declares_class_or_enum);
if (friend_p)
*friend_p = cp_parser_friend_p (&decl_specifiers);
/* There are no template typedefs. */
if (decl_spec_seq_has_spec_p (&decl_specifiers, ds_typedef))
{
error_at (decl_spec_token_start->location,
"template declaration of %<typedef%>");
decl = error_mark_node;
}
/* Gather up the access checks that occurred the
decl-specifier-seq. */
stop_deferring_access_checks ();
/* Check for the declaration of a template class. */
if (declares_class_or_enum)
{
if (cp_parser_declares_only_class_p (parser))
{
decl = shadow_tag (&decl_specifiers);
/* In this case:
struct C {
friend template <typename T> struct A<T>::B;
};
A<T>::B will be represented by a TYPENAME_TYPE, and
therefore not recognized by shadow_tag. */
if (friend_p && *friend_p
&& !decl
&& decl_specifiers.type
&& TYPE_P (decl_specifiers.type))
decl = decl_specifiers.type;
if (decl && decl != error_mark_node)
decl = TYPE_NAME (decl);
else
decl = error_mark_node;
/* Perform access checks for template parameters. */
cp_parser_perform_template_parameter_access_checks (checks);
}
}
/* Complain about missing 'typename' or other invalid type names. */
if (!decl_specifiers.any_type_specifiers_p
&& cp_parser_parse_and_diagnose_invalid_type_name (parser))
{
/* cp_parser_parse_and_diagnose_invalid_type_name calls
cp_parser_skip_to_end_of_block_or_statement, so don't try to parse
the rest of this declaration. */
decl = error_mark_node;
goto out;
}
/* If it's not a template class, try for a template function. If
the next token is a `;', then this declaration does not declare
anything. But, if there were errors in the decl-specifiers, then
the error might well have come from an attempted class-specifier.
In that case, there's no need to warn about a missing declarator. */
if (!decl
&& (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)
|| decl_specifiers.type != error_mark_node))
{
decl = cp_parser_init_declarator (parser,
&decl_specifiers,
checks,
/*function_definition_allowed_p=*/true,
member_p,
declares_class_or_enum,
&function_definition_p,
NULL, NULL);
/* 7.1.1-1 [dcl.stc]
A storage-class-specifier shall not be specified in an explicit
specialization... */
if (decl
&& explicit_specialization_p
&& decl_specifiers.storage_class != sc_none)
{
error_at (decl_spec_token_start->location,
"explicit template specialization cannot have a storage class");
decl = error_mark_node;
}
if (decl && VAR_P (decl))
check_template_variable (decl);
}
/* Look for a trailing `;' after the declaration. */
if (!function_definition_p
&& (decl == error_mark_node
|| !cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON)))
cp_parser_skip_to_end_of_block_or_statement (parser);
out:
pop_deferring_access_checks ();
/* Clear any current qualification; whatever comes next is the start
of something new. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
return decl;
}
/* Parse a cast-expression that is not the operand of a unary "&". */
static tree
cp_parser_simple_cast_expression (cp_parser *parser)
{
return cp_parser_cast_expression (parser, /*address_p=*/false,
/*cast_p=*/false, /*decltype*/false, NULL);
}
/* Parse a functional cast to TYPE. Returns an expression
representing the cast. */
static tree
cp_parser_functional_cast (cp_parser* parser, tree type)
{
vec<tree, va_gc> *vec;
tree expression_list;
tree cast;
bool nonconst_p;
if (!type)
type = error_mark_node;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_lexer_set_source_position (parser->lexer);
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
expression_list = cp_parser_braced_list (parser, &nonconst_p);
CONSTRUCTOR_IS_DIRECT_INIT (expression_list) = 1;
if (TREE_CODE (type) == TYPE_DECL)
type = TREE_TYPE (type);
return finish_compound_literal (type, expression_list,
tf_warning_or_error);
}
vec = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/true,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL);
if (vec == NULL)
expression_list = error_mark_node;
else
{
expression_list = build_tree_list_vec (vec);
release_tree_vector (vec);
}
cast = build_functional_cast (type, expression_list,
tf_warning_or_error);
/* [expr.const]/1: In an integral constant expression "only type
conversions to integral or enumeration type can be used". */
if (TREE_CODE (type) == TYPE_DECL)
type = TREE_TYPE (type);
if (cast != error_mark_node
&& !cast_valid_in_integral_constant_expression_p (type)
&& cp_parser_non_integral_constant_expression (parser,
NIC_CONSTRUCTOR))
return error_mark_node;
return cast;
}
/* Save the tokens that make up the body of a member function defined
in a class-specifier. The DECL_SPECIFIERS and DECLARATOR have
already been parsed. The ATTRIBUTES are any GNU "__attribute__"
specifiers applied to the declaration. Returns the FUNCTION_DECL
for the member function. */
static tree
cp_parser_save_member_function_body (cp_parser* parser,
cp_decl_specifier_seq *decl_specifiers,
cp_declarator *declarator,
tree attributes)
{
cp_token *first;
cp_token *last;
tree fn;
/* Create the FUNCTION_DECL. */
fn = grokmethod (decl_specifiers, declarator, attributes);
cp_finalize_omp_declare_simd (parser, fn);
/* If something went badly wrong, bail out now. */
if (fn == error_mark_node)
{
/* If there's a function-body, skip it. */
if (cp_parser_token_starts_function_definition_p
(cp_lexer_peek_token (parser->lexer)))
cp_parser_skip_to_end_of_block_or_statement (parser);
return error_mark_node;
}
/* Remember it, if there default args to post process. */
cp_parser_save_default_args (parser, fn);
/* Save away the tokens that make up the body of the
function. */
first = parser->lexer->next_token;
/* Handle function try blocks. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY))
cp_lexer_consume_token (parser->lexer);
/* We can have braced-init-list mem-initializers before the fn body. */
if (cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
cp_lexer_consume_token (parser->lexer);
while (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE))
{
/* cache_group will stop after an un-nested { } pair, too. */
if (cp_parser_cache_group (parser, CPP_CLOSE_PAREN, /*depth=*/0))
break;
/* variadic mem-inits have ... after the ')'. */
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
cp_lexer_consume_token (parser->lexer);
}
}
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0);
/* Handle function try blocks. */
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_CATCH))
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0);
last = parser->lexer->next_token;
/* Save away the inline definition; we will process it when the
class is complete. */
DECL_PENDING_INLINE_INFO (fn) = cp_token_cache_new (first, last);
DECL_PENDING_INLINE_P (fn) = 1;
/* We need to know that this was defined in the class, so that
friend templates are handled correctly. */
DECL_INITIALIZED_IN_CLASS_P (fn) = 1;
/* Add FN to the queue of functions to be parsed later. */
vec_safe_push (unparsed_funs_with_definitions, fn);
return fn;
}
/* Save the tokens that make up the in-class initializer for a non-static
data member. Returns a DEFAULT_ARG. */
static tree
cp_parser_save_nsdmi (cp_parser* parser)
{
return cp_parser_cache_defarg (parser, /*nsdmi=*/true);
}
/* Parse a template-argument-list, as well as the trailing ">" (but
not the opening "<"). See cp_parser_template_argument_list for the
return value. */
static tree
cp_parser_enclosed_template_argument_list (cp_parser* parser)
{
tree arguments;
tree saved_scope;
tree saved_qualifying_scope;
tree saved_object_scope;
bool saved_greater_than_is_operator_p;
int saved_unevaluated_operand;
int saved_inhibit_evaluation_warnings;
/* [temp.names]
When parsing a template-id, the first non-nested `>' is taken as
the end of the template-argument-list rather than a greater-than
operator. */
saved_greater_than_is_operator_p
= parser->greater_than_is_operator_p;
parser->greater_than_is_operator_p = false;
/* Parsing the argument list may modify SCOPE, so we save it
here. */
saved_scope = parser->scope;
saved_qualifying_scope = parser->qualifying_scope;
saved_object_scope = parser->object_scope;
/* We need to evaluate the template arguments, even though this
template-id may be nested within a "sizeof". */
saved_unevaluated_operand = cp_unevaluated_operand;
cp_unevaluated_operand = 0;
saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings;
c_inhibit_evaluation_warnings = 0;
/* Parse the template-argument-list itself. */
if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER)
|| cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT))
arguments = NULL_TREE;
else
arguments = cp_parser_template_argument_list (parser);
/* Look for the `>' that ends the template-argument-list. If we find
a '>>' instead, it's probably just a typo. */
if (cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT))
{
if (cxx_dialect != cxx98)
{
/* In C++0x, a `>>' in a template argument list or cast
expression is considered to be two separate `>'
tokens. So, change the current token to a `>', but don't
consume it: it will be consumed later when the outer
template argument list (or cast expression) is parsed.
Note that this replacement of `>' for `>>' is necessary
even if we are parsing tentatively: in the tentative
case, after calling
cp_parser_enclosed_template_argument_list we will always
throw away all of the template arguments and the first
closing `>', either because the template argument list
was erroneous or because we are replacing those tokens
with a CPP_TEMPLATE_ID token. The second `>' (which will
not have been thrown away) is needed either to close an
outer template argument list or to complete a new-style
cast. */
cp_token *token = cp_lexer_peek_token (parser->lexer);
token->type = CPP_GREATER;
}
else if (!saved_greater_than_is_operator_p)
{
/* If we're in a nested template argument list, the '>>' has
to be a typo for '> >'. We emit the error message, but we
continue parsing and we push a '>' as next token, so that
the argument list will be parsed correctly. Note that the
global source location is still on the token before the
'>>', so we need to say explicitly where we want it. */
cp_token *token = cp_lexer_peek_token (parser->lexer);
error_at (token->location, "%<>>%> should be %<> >%> "
"within a nested template argument list");
token->type = CPP_GREATER;
}
else
{
/* If this is not a nested template argument list, the '>>'
is a typo for '>'. Emit an error message and continue.
Same deal about the token location, but here we can get it
right by consuming the '>>' before issuing the diagnostic. */
cp_token *token = cp_lexer_consume_token (parser->lexer);
error_at (token->location,
"spurious %<>>%>, use %<>%> to terminate "
"a template argument list");
}
}
else
cp_parser_skip_to_end_of_template_parameter_list (parser);
/* The `>' token might be a greater-than operator again now. */
parser->greater_than_is_operator_p
= saved_greater_than_is_operator_p;
/* Restore the SAVED_SCOPE. */
parser->scope = saved_scope;
parser->qualifying_scope = saved_qualifying_scope;
parser->object_scope = saved_object_scope;
cp_unevaluated_operand = saved_unevaluated_operand;
c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings;
return arguments;
}
/* MEMBER_FUNCTION is a member function, or a friend. If default
arguments, or the body of the function have not yet been parsed,
parse them now. */
static void
cp_parser_late_parsing_for_member (cp_parser* parser, tree member_function)
{
timevar_push (TV_PARSE_INMETH);
/* If this member is a template, get the underlying
FUNCTION_DECL. */
if (DECL_FUNCTION_TEMPLATE_P (member_function))
member_function = DECL_TEMPLATE_RESULT (member_function);
/* There should not be any class definitions in progress at this
point; the bodies of members are only parsed outside of all class
definitions. */
gcc_assert (parser->num_classes_being_defined == 0);
/* While we're parsing the member functions we might encounter more
classes. We want to handle them right away, but we don't want
them getting mixed up with functions that are currently in the
queue. */
push_unparsed_function_queues (parser);
/* Make sure that any template parameters are in scope. */
maybe_begin_member_template_processing (member_function);
/* If the body of the function has not yet been parsed, parse it
now. */
if (DECL_PENDING_INLINE_P (member_function))
{
tree function_scope;
cp_token_cache *tokens;
/* The function is no longer pending; we are processing it. */
tokens = DECL_PENDING_INLINE_INFO (member_function);
DECL_PENDING_INLINE_INFO (member_function) = NULL;
DECL_PENDING_INLINE_P (member_function) = 0;
/* If this is a local class, enter the scope of the containing
function. */
function_scope = current_function_decl;
if (function_scope)
push_function_context ();
/* Push the body of the function onto the lexer stack. */
cp_parser_push_lexer_for_tokens (parser, tokens);
/* Let the front end know that we going to be defining this
function. */
start_preparsed_function (member_function, NULL_TREE,
SF_PRE_PARSED | SF_INCLASS_INLINE);
/* Don't do access checking if it is a templated function. */
if (processing_template_decl)
push_deferring_access_checks (dk_no_check);
/* #pragma omp declare reduction needs special parsing. */
if (DECL_OMP_DECLARE_REDUCTION_P (member_function))
{
parser->lexer->in_pragma = true;
cp_parser_omp_declare_reduction_exprs (member_function, parser);
finish_function (/*inline*/2);
cp_check_omp_declare_reduction (member_function);
}
else
/* Now, parse the body of the function. */
cp_parser_function_definition_after_declarator (parser,
/*inline_p=*/true);
if (processing_template_decl)
pop_deferring_access_checks ();
/* Leave the scope of the containing function. */
if (function_scope)
pop_function_context ();
cp_parser_pop_lexer (parser);
}
/* Remove any template parameters from the symbol table. */
maybe_end_member_template_processing ();
/* Restore the queue. */
pop_unparsed_function_queues (parser);
timevar_pop (TV_PARSE_INMETH);
}
/* If DECL contains any default args, remember it on the unparsed
functions queue. */
static void
cp_parser_save_default_args (cp_parser* parser, tree decl)
{
tree probe;
for (probe = TYPE_ARG_TYPES (TREE_TYPE (decl));
probe;
probe = TREE_CHAIN (probe))
if (TREE_PURPOSE (probe))
{
cp_default_arg_entry entry = {current_class_type, decl};
vec_safe_push (unparsed_funs_with_default_args, entry);
break;
}
}
/* DEFAULT_ARG contains the saved tokens for the initializer of DECL,
which is either a FIELD_DECL or PARM_DECL. Parse it and return
the result. For a PARM_DECL, PARMTYPE is the corresponding type
from the parameter-type-list. */
static tree
cp_parser_late_parse_one_default_arg (cp_parser *parser, tree decl,
tree default_arg, tree parmtype)
{
cp_token_cache *tokens;
tree parsed_arg;
bool dummy;
if (default_arg == error_mark_node)
return error_mark_node;
/* Push the saved tokens for the default argument onto the parser's
lexer stack. */
tokens = DEFARG_TOKENS (default_arg);
cp_parser_push_lexer_for_tokens (parser, tokens);
start_lambda_scope (decl);
/* Parse the default argument. */
parsed_arg = cp_parser_initializer (parser, &dummy, &dummy);
if (BRACE_ENCLOSED_INITIALIZER_P (parsed_arg))
maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS);
finish_lambda_scope ();
if (parsed_arg == error_mark_node)
cp_parser_skip_to_end_of_statement (parser);
if (!processing_template_decl)
{
/* In a non-template class, check conversions now. In a template,
we'll wait and instantiate these as needed. */
if (TREE_CODE (decl) == PARM_DECL)
parsed_arg = check_default_argument (parmtype, parsed_arg,
tf_warning_or_error);
else
parsed_arg = digest_nsdmi_init (decl, parsed_arg);
}
/* If the token stream has not been completely used up, then
there was extra junk after the end of the default
argument. */
if (!cp_lexer_next_token_is (parser->lexer, CPP_EOF))
{
if (TREE_CODE (decl) == PARM_DECL)
cp_parser_error (parser, "expected %<,%>");
else
cp_parser_error (parser, "expected %<;%>");
}
/* Revert to the main lexer. */
cp_parser_pop_lexer (parser);
return parsed_arg;
}
/* FIELD is a non-static data member with an initializer which we saved for
later; parse it now. */
static void
cp_parser_late_parsing_nsdmi (cp_parser *parser, tree field)
{
tree def;
maybe_begin_member_template_processing (field);
push_unparsed_function_queues (parser);
def = cp_parser_late_parse_one_default_arg (parser, field,
DECL_INITIAL (field),
NULL_TREE);
pop_unparsed_function_queues (parser);
maybe_end_member_template_processing ();
DECL_INITIAL (field) = def;
}
/* FN is a FUNCTION_DECL which may contains a parameter with an
unparsed DEFAULT_ARG. Parse the default args now. This function
assumes that the current scope is the scope in which the default
argument should be processed. */
static void
cp_parser_late_parsing_default_args (cp_parser *parser, tree fn)
{
bool saved_local_variables_forbidden_p;
tree parm, parmdecl;
/* While we're parsing the default args, we might (due to the
statement expression extension) encounter more classes. We want
to handle them right away, but we don't want them getting mixed
up with default args that are currently in the queue. */
push_unparsed_function_queues (parser);
/* Local variable names (and the `this' keyword) may not appear
in a default argument. */
saved_local_variables_forbidden_p = parser->local_variables_forbidden_p;
parser->local_variables_forbidden_p = true;
push_defarg_context (fn);
for (parm = TYPE_ARG_TYPES (TREE_TYPE (fn)),
parmdecl = DECL_ARGUMENTS (fn);
parm && parm != void_list_node;
parm = TREE_CHAIN (parm),
parmdecl = DECL_CHAIN (parmdecl))
{
tree default_arg = TREE_PURPOSE (parm);
tree parsed_arg;
vec<tree, va_gc> *insts;
tree copy;
unsigned ix;
if (!default_arg)
continue;
if (TREE_CODE (default_arg) != DEFAULT_ARG)
/* This can happen for a friend declaration for a function
already declared with default arguments. */
continue;
parsed_arg
= cp_parser_late_parse_one_default_arg (parser, parmdecl,
default_arg,
TREE_VALUE (parm));
if (parsed_arg == error_mark_node)
{
continue;
}
TREE_PURPOSE (parm) = parsed_arg;
/* Update any instantiations we've already created. */
for (insts = DEFARG_INSTANTIATIONS (default_arg), ix = 0;
vec_safe_iterate (insts, ix, ©); ix++)
TREE_PURPOSE (copy) = parsed_arg;
}
pop_defarg_context ();
/* Make sure no default arg is missing. */
check_default_args (fn);
/* Restore the state of local_variables_forbidden_p. */
parser->local_variables_forbidden_p = saved_local_variables_forbidden_p;
/* Restore the queue. */
pop_unparsed_function_queues (parser);
}
/* Subroutine of cp_parser_sizeof_operand, for handling C++11
sizeof ... ( identifier )
where the 'sizeof' token has already been consumed. */
static tree
cp_parser_sizeof_pack (cp_parser *parser)
{
/* Consume the `...'. */
cp_lexer_consume_token (parser->lexer);
maybe_warn_variadic_templates ();
bool paren = cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN);
if (paren)
cp_lexer_consume_token (parser->lexer);
else
permerror (cp_lexer_peek_token (parser->lexer)->location,
"%<sizeof...%> argument must be surrounded by parentheses");
cp_token *token = cp_lexer_peek_token (parser->lexer);
tree name = cp_parser_identifier (parser);
if (name == error_mark_node)
return error_mark_node;
/* The name is not qualified. */
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
tree expr = cp_parser_lookup_name_simple (parser, name, token->location);
if (expr == error_mark_node)
cp_parser_name_lookup_error (parser, name, expr, NLE_NULL,
token->location);
if (TREE_CODE (expr) == TYPE_DECL)
expr = TREE_TYPE (expr);
else if (TREE_CODE (expr) == CONST_DECL)
expr = DECL_INITIAL (expr);
expr = make_pack_expansion (expr);
if (paren)
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return expr;
}
/* Parse the operand of `sizeof' (or a similar operator). Returns
either a TYPE or an expression, depending on the form of the
input. The KEYWORD indicates which kind of expression we have
encountered. */
static tree
cp_parser_sizeof_operand (cp_parser* parser, enum rid keyword)
{
tree expr = NULL_TREE;
const char *saved_message;
char *tmp;
bool saved_integral_constant_expression_p;
bool saved_non_integral_constant_expression_p;
/* If it's a `...', then we are computing the length of a parameter
pack. */
if (keyword == RID_SIZEOF
&& cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
return cp_parser_sizeof_pack (parser);
/* Types cannot be defined in a `sizeof' expression. Save away the
old message. */
saved_message = parser->type_definition_forbidden_message;
/* And create the new one. */
tmp = concat ("types may not be defined in %<",
IDENTIFIER_POINTER (ridpointers[keyword]),
"%> expressions", NULL);
parser->type_definition_forbidden_message = tmp;
/* The restrictions on constant-expressions do not apply inside
sizeof expressions. */
saved_integral_constant_expression_p
= parser->integral_constant_expression_p;
saved_non_integral_constant_expression_p
= parser->non_integral_constant_expression_p;
parser->integral_constant_expression_p = false;
/* Do not actually evaluate the expression. */
++cp_unevaluated_operand;
++c_inhibit_evaluation_warnings;
/* If it's a `(', then we might be looking at the type-id
construction. */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree type = NULL_TREE;
/* We can't be sure yet whether we're looking at a type-id or an
expression. */
cp_parser_parse_tentatively (parser);
/* Note: as a GNU Extension, compound literals are considered
postfix-expressions as they are in C99, so they are valid
arguments to sizeof. See comment in cp_parser_cast_expression
for details. */
if (cp_parser_compound_literal_p (parser))
cp_parser_simulate_error (parser);
else
{
bool saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p;
parser->in_type_id_in_expr_p = true;
/* Look for the type-id. */
type = cp_parser_type_id (parser);
/* Look for the closing `)'. */
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p;
}
/* If all went well, then we're done. */
if (cp_parser_parse_definitely (parser))
{
cp_decl_specifier_seq decl_specs;
/* Build a trivial decl-specifier-seq. */
clear_decl_specs (&decl_specs);
decl_specs.type = type;
/* Call grokdeclarator to figure out what type this is. */
expr = grokdeclarator (NULL,
&decl_specs,
TYPENAME,
/*initialized=*/0,
/*attrlist=*/NULL);
}
}
/* If the type-id production did not work out, then we must be
looking at the unary-expression production. */
if (!expr)
expr = cp_parser_unary_expression (parser);
/* Go back to evaluating expressions. */
--cp_unevaluated_operand;
--c_inhibit_evaluation_warnings;
/* Free the message we created. */
free (tmp);
/* And restore the old one. */
parser->type_definition_forbidden_message = saved_message;
parser->integral_constant_expression_p
= saved_integral_constant_expression_p;
parser->non_integral_constant_expression_p
= saved_non_integral_constant_expression_p;
return expr;
}
/* If the current declaration has no declarator, return true. */
static bool
cp_parser_declares_only_class_p (cp_parser *parser)
{
/* If the next token is a `;' or a `,' then there is no
declarator. */
return (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_COMMA));
}
/* Update the DECL_SPECS to reflect the storage class indicated by
KEYWORD. */
static void
cp_parser_set_storage_class (cp_parser *parser,
cp_decl_specifier_seq *decl_specs,
enum rid keyword,
cp_token *token)
{
cp_storage_class storage_class;
if (parser->in_unbraced_linkage_specification_p)
{
error_at (token->location, "invalid use of %qD in linkage specification",
ridpointers[keyword]);
return;
}
else if (decl_specs->storage_class != sc_none)
{
decl_specs->conflicting_specifiers_p = true;
return;
}
if ((keyword == RID_EXTERN || keyword == RID_STATIC)
&& decl_spec_seq_has_spec_p (decl_specs, ds_thread)
&& decl_specs->gnu_thread_keyword_p)
{
pedwarn (decl_specs->locations[ds_thread], 0,
"%<__thread%> before %qD", ridpointers[keyword]);
}
switch (keyword)
{
case RID_AUTO:
storage_class = sc_auto;
break;
case RID_REGISTER:
storage_class = sc_register;
break;
case RID_STATIC:
storage_class = sc_static;
break;
case RID_EXTERN:
storage_class = sc_extern;
break;
case RID_MUTABLE:
storage_class = sc_mutable;
break;
default:
gcc_unreachable ();
}
decl_specs->storage_class = storage_class;
set_and_check_decl_spec_loc (decl_specs, ds_storage_class, token);
/* A storage class specifier cannot be applied alongside a typedef
specifier. If there is a typedef specifier present then set
conflicting_specifiers_p which will trigger an error later
on in grokdeclarator. */
if (decl_spec_seq_has_spec_p (decl_specs, ds_typedef))
decl_specs->conflicting_specifiers_p = true;
}
/* Update the DECL_SPECS to reflect the TYPE_SPEC. If TYPE_DEFINITION_P
is true, the type is a class or enum definition. */
static void
cp_parser_set_decl_spec_type (cp_decl_specifier_seq *decl_specs,
tree type_spec,
cp_token *token,
bool type_definition_p)
{
decl_specs->any_specifiers_p = true;
/* If the user tries to redeclare bool, char16_t, char32_t, or wchar_t
(with, for example, in "typedef int wchar_t;") we remember that
this is what happened. In system headers, we ignore these
declarations so that G++ can work with system headers that are not
C++-safe. */
if (decl_spec_seq_has_spec_p (decl_specs, ds_typedef)
&& !type_definition_p
&& (type_spec == boolean_type_node
|| type_spec == char16_type_node
|| type_spec == char32_type_node
|| type_spec == wchar_type_node)
&& (decl_specs->type
|| decl_spec_seq_has_spec_p (decl_specs, ds_long)
|| decl_spec_seq_has_spec_p (decl_specs, ds_short)
|| decl_spec_seq_has_spec_p (decl_specs, ds_unsigned)
|| decl_spec_seq_has_spec_p (decl_specs, ds_signed)))
{
decl_specs->redefined_builtin_type = type_spec;
set_and_check_decl_spec_loc (decl_specs,
ds_redefined_builtin_type_spec,
token);
if (!decl_specs->type)
{
decl_specs->type = type_spec;
decl_specs->type_definition_p = false;
set_and_check_decl_spec_loc (decl_specs,ds_type_spec, token);
}
}
else if (decl_specs->type)
decl_specs->multiple_types_p = true;
else
{
decl_specs->type = type_spec;
decl_specs->type_definition_p = type_definition_p;
decl_specs->redefined_builtin_type = NULL_TREE;
set_and_check_decl_spec_loc (decl_specs, ds_type_spec, token);
}
}
/* True iff TOKEN is the GNU keyword __thread. */
static bool
token_is__thread (cp_token *token)
{
gcc_assert (token->keyword == RID_THREAD);
return !strcmp (IDENTIFIER_POINTER (token->u.value), "__thread");
}
/* Set the location for a declarator specifier and check if it is
duplicated.
DECL_SPECS is the sequence of declarator specifiers onto which to
set the location.
DS is the single declarator specifier to set which location is to
be set onto the existing sequence of declarators.
LOCATION is the location for the declarator specifier to
consider. */
static void
set_and_check_decl_spec_loc (cp_decl_specifier_seq *decl_specs,
cp_decl_spec ds, cp_token *token)
{
gcc_assert (ds < ds_last);
if (decl_specs == NULL)
return;
source_location location = token->location;
if (decl_specs->locations[ds] == 0)
{
decl_specs->locations[ds] = location;
if (ds == ds_thread)
decl_specs->gnu_thread_keyword_p = token_is__thread (token);
}
else
{
if (ds == ds_long)
{
if (decl_specs->locations[ds_long_long] != 0)
error_at (location,
"%<long long long%> is too long for GCC");
else
{
decl_specs->locations[ds_long_long] = location;
pedwarn_cxx98 (location,
OPT_Wlong_long,
"ISO C++ 1998 does not support %<long long%>");
}
}
else if (ds == ds_thread)
{
bool gnu = token_is__thread (token);
if (gnu != decl_specs->gnu_thread_keyword_p)
error_at (location,
"both %<__thread%> and %<thread_local%> specified");
else
error_at (location, "duplicate %qD", token->u.value);
}
else
{
static const char *const decl_spec_names[] = {
"signed",
"unsigned",
"short",
"long",
"const",
"volatile",
"restrict",
"inline",
"virtual",
"explicit",
"friend",
"typedef",
"using",
"constexpr",
"__complex"
};
error_at (location,
"duplicate %qs", decl_spec_names[ds]);
}
}
}
/* Return true iff the declarator specifier DS is present in the
sequence of declarator specifiers DECL_SPECS. */
bool
decl_spec_seq_has_spec_p (const cp_decl_specifier_seq * decl_specs,
cp_decl_spec ds)
{
gcc_assert (ds < ds_last);
if (decl_specs == NULL)
return false;
return decl_specs->locations[ds] != 0;
}
/* DECL_SPECIFIERS is the representation of a decl-specifier-seq.
Returns TRUE iff `friend' appears among the DECL_SPECIFIERS. */
static bool
cp_parser_friend_p (const cp_decl_specifier_seq *decl_specifiers)
{
return decl_spec_seq_has_spec_p (decl_specifiers, ds_friend);
}
/* Issue an error message indicating that TOKEN_DESC was expected.
If KEYWORD is true, it indicated this function is called by
cp_parser_require_keword and the required token can only be
a indicated keyword. */
static void
cp_parser_required_error (cp_parser *parser,
required_token token_desc,
bool keyword)
{
switch (token_desc)
{
case RT_NEW:
cp_parser_error (parser, "expected %<new%>");
return;
case RT_DELETE:
cp_parser_error (parser, "expected %<delete%>");
return;
case RT_RETURN:
cp_parser_error (parser, "expected %<return%>");
return;
case RT_WHILE:
cp_parser_error (parser, "expected %<while%>");
return;
case RT_EXTERN:
cp_parser_error (parser, "expected %<extern%>");
return;
case RT_STATIC_ASSERT:
cp_parser_error (parser, "expected %<static_assert%>");
return;
case RT_DECLTYPE:
cp_parser_error (parser, "expected %<decltype%>");
return;
case RT_OPERATOR:
cp_parser_error (parser, "expected %<operator%>");
return;
case RT_CLASS:
cp_parser_error (parser, "expected %<class%>");
return;
case RT_TEMPLATE:
cp_parser_error (parser, "expected %<template%>");
return;
case RT_NAMESPACE:
cp_parser_error (parser, "expected %<namespace%>");
return;
case RT_USING:
cp_parser_error (parser, "expected %<using%>");
return;
case RT_ASM:
cp_parser_error (parser, "expected %<asm%>");
return;
case RT_TRY:
cp_parser_error (parser, "expected %<try%>");
return;
case RT_CATCH:
cp_parser_error (parser, "expected %<catch%>");
return;
case RT_THROW:
cp_parser_error (parser, "expected %<throw%>");
return;
case RT_LABEL:
cp_parser_error (parser, "expected %<__label__%>");
return;
case RT_AT_TRY:
cp_parser_error (parser, "expected %<@try%>");
return;
case RT_AT_SYNCHRONIZED:
cp_parser_error (parser, "expected %<@synchronized%>");
return;
case RT_AT_THROW:
cp_parser_error (parser, "expected %<@throw%>");
return;
case RT_TRANSACTION_ATOMIC:
cp_parser_error (parser, "expected %<__transaction_atomic%>");
return;
case RT_TRANSACTION_RELAXED:
cp_parser_error (parser, "expected %<__transaction_relaxed%>");
return;
default:
break;
}
if (!keyword)
{
switch (token_desc)
{
case RT_SEMICOLON:
cp_parser_error (parser, "expected %<;%>");
return;
case RT_OPEN_PAREN:
cp_parser_error (parser, "expected %<(%>");
return;
case RT_CLOSE_BRACE:
cp_parser_error (parser, "expected %<}%>");
return;
case RT_OPEN_BRACE:
cp_parser_error (parser, "expected %<{%>");
return;
case RT_CLOSE_SQUARE:
cp_parser_error (parser, "expected %<]%>");
return;
case RT_OPEN_SQUARE:
cp_parser_error (parser, "expected %<[%>");
return;
case RT_COMMA:
cp_parser_error (parser, "expected %<,%>");
return;
case RT_SCOPE:
cp_parser_error (parser, "expected %<::%>");
return;
case RT_LESS:
cp_parser_error (parser, "expected %<<%>");
return;
case RT_GREATER:
cp_parser_error (parser, "expected %<>%>");
return;
case RT_EQ:
cp_parser_error (parser, "expected %<=%>");
return;
case RT_ELLIPSIS:
cp_parser_error (parser, "expected %<...%>");
return;
case RT_MULT:
cp_parser_error (parser, "expected %<*%>");
return;
case RT_COMPL:
cp_parser_error (parser, "expected %<~%>");
return;
case RT_COLON:
cp_parser_error (parser, "expected %<:%>");
return;
case RT_COLON_SCOPE:
cp_parser_error (parser, "expected %<:%> or %<::%>");
return;
case RT_CLOSE_PAREN:
cp_parser_error (parser, "expected %<)%>");
return;
case RT_COMMA_CLOSE_PAREN:
cp_parser_error (parser, "expected %<,%> or %<)%>");
return;
case RT_PRAGMA_EOL:
cp_parser_error (parser, "expected end of line");
return;
case RT_NAME:
cp_parser_error (parser, "expected identifier");
return;
case RT_SELECT:
cp_parser_error (parser, "expected selection-statement");
return;
case RT_INTERATION:
cp_parser_error (parser, "expected iteration-statement");
return;
case RT_JUMP:
cp_parser_error (parser, "expected jump-statement");
return;
case RT_CLASS_KEY:
cp_parser_error (parser, "expected class-key");
return;
case RT_CLASS_TYPENAME_TEMPLATE:
cp_parser_error (parser,
"expected %<class%>, %<typename%>, or %<template%>");
return;
default:
gcc_unreachable ();
}
}
else
gcc_unreachable ();
}
/* If the next token is of the indicated TYPE, consume it. Otherwise,
issue an error message indicating that TOKEN_DESC was expected.
Returns the token consumed, if the token had the appropriate type.
Otherwise, returns NULL. */
static cp_token *
cp_parser_require (cp_parser* parser,
enum cpp_ttype type,
required_token token_desc)
{
if (cp_lexer_next_token_is (parser->lexer, type))
return cp_lexer_consume_token (parser->lexer);
else
{
/* Output the MESSAGE -- unless we're parsing tentatively. */
if (!cp_parser_simulate_error (parser))
cp_parser_required_error (parser, token_desc, /*keyword=*/false);
return NULL;
}
}
/* An error message is produced if the next token is not '>'.
All further tokens are skipped until the desired token is
found or '{', '}', ';' or an unbalanced ')' or ']'. */
static void
cp_parser_skip_to_end_of_template_parameter_list (cp_parser* parser)
{
/* Current level of '< ... >'. */
unsigned level = 0;
/* Ignore '<' and '>' nested inside '( ... )' or '[ ... ]'. */
unsigned nesting_depth = 0;
/* Are we ready, yet? If not, issue error message. */
if (cp_parser_require (parser, CPP_GREATER, RT_GREATER))
return;
/* Skip tokens until the desired token is found. */
while (true)
{
/* Peek at the next token. */
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_LESS:
if (!nesting_depth)
++level;
break;
case CPP_RSHIFT:
if (cxx_dialect == cxx98)
/* C++0x views the `>>' operator as two `>' tokens, but
C++98 does not. */
break;
else if (!nesting_depth && level-- == 0)
{
/* We've hit a `>>' where the first `>' closes the
template argument list, and the second `>' is
spurious. Just consume the `>>' and stop; we've
already produced at least one error. */
cp_lexer_consume_token (parser->lexer);
return;
}
/* Fall through for C++0x, so we handle the second `>' in
the `>>'. */
case CPP_GREATER:
if (!nesting_depth && level-- == 0)
{
/* We've reached the token we want, consume it and stop. */
cp_lexer_consume_token (parser->lexer);
return;
}
break;
case CPP_OPEN_PAREN:
case CPP_OPEN_SQUARE:
++nesting_depth;
break;
case CPP_CLOSE_PAREN:
case CPP_CLOSE_SQUARE:
if (nesting_depth-- == 0)
return;
break;
case CPP_EOF:
case CPP_PRAGMA_EOL:
case CPP_SEMICOLON:
case CPP_OPEN_BRACE:
case CPP_CLOSE_BRACE:
/* The '>' was probably forgotten, don't look further. */
return;
default:
break;
}
/* Consume this token. */
cp_lexer_consume_token (parser->lexer);
}
}
/* If the next token is the indicated keyword, consume it. Otherwise,
issue an error message indicating that TOKEN_DESC was expected.
Returns the token consumed, if the token had the appropriate type.
Otherwise, returns NULL. */
static cp_token *
cp_parser_require_keyword (cp_parser* parser,
enum rid keyword,
required_token token_desc)
{
cp_token *token = cp_parser_require (parser, CPP_KEYWORD, token_desc);
if (token && token->keyword != keyword)
{
cp_parser_required_error (parser, token_desc, /*keyword=*/true);
return NULL;
}
return token;
}
/* Returns TRUE iff TOKEN is a token that can begin the body of a
function-definition. */
static bool
cp_parser_token_starts_function_definition_p (cp_token* token)
{
return (/* An ordinary function-body begins with an `{'. */
token->type == CPP_OPEN_BRACE
/* A ctor-initializer begins with a `:'. */
|| token->type == CPP_COLON
/* A function-try-block begins with `try'. */
|| token->keyword == RID_TRY
/* A function-transaction-block begins with `__transaction_atomic'
or `__transaction_relaxed'. */
|| token->keyword == RID_TRANSACTION_ATOMIC
|| token->keyword == RID_TRANSACTION_RELAXED
/* The named return value extension begins with `return'. */
|| token->keyword == RID_RETURN);
}
/* Returns TRUE iff the next token is the ":" or "{" beginning a class
definition. */
static bool
cp_parser_next_token_starts_class_definition_p (cp_parser *parser)
{
cp_token *token;
token = cp_lexer_peek_token (parser->lexer);
return (token->type == CPP_OPEN_BRACE
|| (token->type == CPP_COLON
&& !parser->colon_doesnt_start_class_def_p));
}
/* Returns TRUE iff the next token is the "," or ">" (or `>>', in
C++0x) ending a template-argument. */
static bool
cp_parser_next_token_ends_template_argument_p (cp_parser *parser)
{
cp_token *token;
token = cp_lexer_peek_token (parser->lexer);
return (token->type == CPP_COMMA
|| token->type == CPP_GREATER
|| token->type == CPP_ELLIPSIS
|| ((cxx_dialect != cxx98) && token->type == CPP_RSHIFT));
}
/* Returns TRUE iff the n-th token is a "<", or the n-th is a "[" and the
(n+1)-th is a ":" (which is a possible digraph typo for "< ::"). */
static bool
cp_parser_nth_token_starts_template_argument_list_p (cp_parser * parser,
size_t n)
{
cp_token *token;
token = cp_lexer_peek_nth_token (parser->lexer, n);
if (token->type == CPP_LESS)
return true;
/* Check for the sequence `<::' in the original code. It would be lexed as
`[:', where `[' is a digraph, and there is no whitespace before
`:'. */
if (token->type == CPP_OPEN_SQUARE && token->flags & DIGRAPH)
{
cp_token *token2;
token2 = cp_lexer_peek_nth_token (parser->lexer, n+1);
if (token2->type == CPP_COLON && !(token2->flags & PREV_WHITE))
return true;
}
return false;
}
/* Returns the kind of tag indicated by TOKEN, if it is a class-key,
or none_type otherwise. */
static enum tag_types
cp_parser_token_is_class_key (cp_token* token)
{
switch (token->keyword)
{
case RID_CLASS:
return class_type;
case RID_STRUCT:
return record_type;
case RID_UNION:
return union_type;
default:
return none_type;
}
}
/* Returns the kind of tag indicated by TOKEN, if it is a type-parameter-key,
or none_type otherwise or if the token is null. */
static enum tag_types
cp_parser_token_is_type_parameter_key (cp_token* token)
{
if (!token)
return none_type;
switch (token->keyword)
{
case RID_CLASS:
return class_type;
case RID_TYPENAME:
return typename_type;
default:
return none_type;
}
}
/* Issue an error message if the CLASS_KEY does not match the TYPE. */
static void
cp_parser_check_class_key (enum tag_types class_key, tree type)
{
if (type == error_mark_node)
return;
if ((TREE_CODE (type) == UNION_TYPE) != (class_key == union_type))
{
if (permerror (input_location, "%qs tag used in naming %q#T",
class_key == union_type ? "union"
: class_key == record_type ? "struct" : "class",
type))
inform (DECL_SOURCE_LOCATION (TYPE_NAME (type)),
"%q#T was previously declared here", type);
}
}
/* Issue an error message if DECL is redeclared with different
access than its original declaration [class.access.spec/3].
This applies to nested classes and nested class templates.
[class.mem/1]. */
static void
cp_parser_check_access_in_redeclaration (tree decl, location_t location)
{
if (!decl || !CLASS_TYPE_P (TREE_TYPE (decl)))
return;
if ((TREE_PRIVATE (decl)
!= (current_access_specifier == access_private_node))
|| (TREE_PROTECTED (decl)
!= (current_access_specifier == access_protected_node)))
error_at (location, "%qD redeclared with different access", decl);
}
/* Look for the `template' keyword, as a syntactic disambiguator.
Return TRUE iff it is present, in which case it will be
consumed. */
static bool
cp_parser_optional_template_keyword (cp_parser *parser)
{
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE))
{
/* In C++98 the `template' keyword can only be used within templates;
outside templates the parser can always figure out what is a
template and what is not. In C++11, per the resolution of DR 468,
`template' is allowed in cases where it is not strictly necessary. */
if (!processing_template_decl
&& pedantic && cxx_dialect == cxx98)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
pedwarn (token->location, OPT_Wpedantic,
"in C++98 %<template%> (as a disambiguator) is only "
"allowed within templates");
/* If this part of the token stream is rescanned, the same
error message would be generated. So, we purge the token
from the stream. */
cp_lexer_purge_token (parser->lexer);
return false;
}
else
{
/* Consume the `template' keyword. */
cp_lexer_consume_token (parser->lexer);
return true;
}
}
return false;
}
/* The next token is a CPP_NESTED_NAME_SPECIFIER. Consume the token,
set PARSER->SCOPE, and perform other related actions. */
static void
cp_parser_pre_parsed_nested_name_specifier (cp_parser *parser)
{
int i;
struct tree_check *check_value;
deferred_access_check *chk;
vec<deferred_access_check, va_gc> *checks;
/* Get the stored value. */
check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value;
/* Perform any access checks that were deferred. */
checks = check_value->checks;
if (checks)
{
FOR_EACH_VEC_SAFE_ELT (checks, i, chk)
perform_or_defer_access_check (chk->binfo,
chk->decl,
chk->diag_decl, tf_warning_or_error);
}
/* Set the scope from the stored value. */
parser->scope = check_value->value;
parser->qualifying_scope = check_value->qualifying_scope;
parser->object_scope = NULL_TREE;
}
/* Consume tokens up through a non-nested END token. Returns TRUE if we
encounter the end of a block before what we were looking for. */
static bool
cp_parser_cache_group (cp_parser *parser,
enum cpp_ttype end,
unsigned depth)
{
while (true)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* Abort a parenthesized expression if we encounter a semicolon. */
if ((end == CPP_CLOSE_PAREN || depth == 0)
&& token->type == CPP_SEMICOLON)
return true;
/* If we've reached the end of the file, stop. */
if (token->type == CPP_EOF
|| (end != CPP_PRAGMA_EOL
&& token->type == CPP_PRAGMA_EOL))
return true;
if (token->type == CPP_CLOSE_BRACE && depth == 0)
/* We've hit the end of an enclosing block, so there's been some
kind of syntax error. */
return true;
/* Consume the token. */
cp_lexer_consume_token (parser->lexer);
/* See if it starts a new group. */
if (token->type == CPP_OPEN_BRACE)
{
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, depth + 1);
/* In theory this should probably check end == '}', but
cp_parser_save_member_function_body needs it to exit
after either '}' or ')' when called with ')'. */
if (depth == 0)
return false;
}
else if (token->type == CPP_OPEN_PAREN)
{
cp_parser_cache_group (parser, CPP_CLOSE_PAREN, depth + 1);
if (depth == 0 && end == CPP_CLOSE_PAREN)
return false;
}
else if (token->type == CPP_PRAGMA)
cp_parser_cache_group (parser, CPP_PRAGMA_EOL, depth + 1);
else if (token->type == end)
return false;
}
}
/* Like above, for caching a default argument or NSDMI. Both of these are
terminated by a non-nested comma, but it can be unclear whether or not a
comma is nested in a template argument list unless we do more parsing.
In order to handle this ambiguity, when we encounter a ',' after a '<'
we try to parse what follows as a parameter-declaration-list (in the
case of a default argument) or a member-declarator (in the case of an
NSDMI). If that succeeds, then we stop caching. */
static tree
cp_parser_cache_defarg (cp_parser *parser, bool nsdmi)
{
unsigned depth = 0;
int maybe_template_id = 0;
cp_token *first_token;
cp_token *token;
tree default_argument;
/* Add tokens until we have processed the entire default
argument. We add the range [first_token, token). */
first_token = cp_lexer_peek_token (parser->lexer);
if (first_token->type == CPP_OPEN_BRACE)
{
/* For list-initialization, this is straightforward. */
cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0);
token = cp_lexer_peek_token (parser->lexer);
}
else while (true)
{
bool done = false;
/* Peek at the next token. */
token = cp_lexer_peek_token (parser->lexer);
/* What we do depends on what token we have. */
switch (token->type)
{
/* In valid code, a default argument must be
immediately followed by a `,' `)', or `...'. */
case CPP_COMMA:
if (depth == 0 && maybe_template_id)
{
/* If we've seen a '<', we might be in a
template-argument-list. Until Core issue 325 is
resolved, we don't know how this situation ought
to be handled, so try to DTRT. We check whether
what comes after the comma is a valid parameter
declaration list. If it is, then the comma ends
the default argument; otherwise the default
argument continues. */
bool error = false;
/* Set ITALP so cp_parser_parameter_declaration_list
doesn't decide to commit to this parse. */
bool saved_italp = parser->in_template_argument_list_p;
parser->in_template_argument_list_p = true;
cp_parser_parse_tentatively (parser);
cp_lexer_consume_token (parser->lexer);
if (nsdmi)
{
int ctor_dtor_or_conv_p;
cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
/*member_p=*/true,
/*friend_p=*/false);
}
else
{
begin_scope (sk_function_parms, NULL_TREE);
cp_parser_parameter_declaration_list (parser, &error);
pop_bindings_and_leave_scope ();
}
if (!cp_parser_error_occurred (parser) && !error)
done = true;
cp_parser_abort_tentative_parse (parser);
parser->in_template_argument_list_p = saved_italp;
break;
}
case CPP_CLOSE_PAREN:
case CPP_ELLIPSIS:
/* If we run into a non-nested `;', `}', or `]',
then the code is invalid -- but the default
argument is certainly over. */
case CPP_SEMICOLON:
case CPP_CLOSE_BRACE:
case CPP_CLOSE_SQUARE:
if (depth == 0
/* Handle correctly int n = sizeof ... ( p ); */
&& token->type != CPP_ELLIPSIS)
done = true;
/* Update DEPTH, if necessary. */
else if (token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_SQUARE)
--depth;
break;
case CPP_OPEN_PAREN:
case CPP_OPEN_SQUARE:
case CPP_OPEN_BRACE:
++depth;
break;
case CPP_LESS:
if (depth == 0)
/* This might be the comparison operator, or it might
start a template argument list. */
++maybe_template_id;
break;
case CPP_RSHIFT:
if (cxx_dialect == cxx98)
break;
/* Fall through for C++0x, which treats the `>>'
operator like two `>' tokens in certain
cases. */
case CPP_GREATER:
if (depth == 0)
{
/* This might be an operator, or it might close a
template argument list. But if a previous '<'
started a template argument list, this will have
closed it, so we can't be in one anymore. */
maybe_template_id -= 1 + (token->type == CPP_RSHIFT);
if (maybe_template_id < 0)
maybe_template_id = 0;
}
break;
/* If we run out of tokens, issue an error message. */
case CPP_EOF:
case CPP_PRAGMA_EOL:
error_at (token->location, "file ends in default argument");
done = true;
break;
case CPP_NAME:
case CPP_SCOPE:
/* In these cases, we should look for template-ids.
For example, if the default argument is
`X<int, double>()', we need to do name lookup to
figure out whether or not `X' is a template; if
so, the `,' does not end the default argument.
That is not yet done. */
break;
default:
break;
}
/* If we've reached the end, stop. */
if (done)
break;
/* Add the token to the token block. */
token = cp_lexer_consume_token (parser->lexer);
}
/* Create a DEFAULT_ARG to represent the unparsed default
argument. */
default_argument = make_node (DEFAULT_ARG);
DEFARG_TOKENS (default_argument)
= cp_token_cache_new (first_token, token);
DEFARG_INSTANTIATIONS (default_argument) = NULL;
return default_argument;
}
/* Begin parsing tentatively. We always save tokens while parsing
tentatively so that if the tentative parsing fails we can restore the
tokens. */
static void
cp_parser_parse_tentatively (cp_parser* parser)
{
/* Enter a new parsing context. */
parser->context = cp_parser_context_new (parser->context);
/* Begin saving tokens. */
cp_lexer_save_tokens (parser->lexer);
/* In order to avoid repetitive access control error messages,
access checks are queued up until we are no longer parsing
tentatively. */
push_deferring_access_checks (dk_deferred);
}
/* Commit to the currently active tentative parse. */
static void
cp_parser_commit_to_tentative_parse (cp_parser* parser)
{
cp_parser_context *context;
cp_lexer *lexer;
/* Mark all of the levels as committed. */
lexer = parser->lexer;
for (context = parser->context; context->next; context = context->next)
{
if (context->status == CP_PARSER_STATUS_KIND_COMMITTED)
break;
context->status = CP_PARSER_STATUS_KIND_COMMITTED;
while (!cp_lexer_saving_tokens (lexer))
lexer = lexer->next;
cp_lexer_commit_tokens (lexer);
}
}
/* Commit to the topmost currently active tentative parse.
Note that this function shouldn't be called when there are
irreversible side-effects while in a tentative state. For
example, we shouldn't create a permanent entry in the symbol
table, or issue an error message that might not apply if the
tentative parse is aborted. */
static void
cp_parser_commit_to_topmost_tentative_parse (cp_parser* parser)
{
cp_parser_context *context = parser->context;
cp_lexer *lexer = parser->lexer;
if (context)
{
if (context->status == CP_PARSER_STATUS_KIND_COMMITTED)
return;
context->status = CP_PARSER_STATUS_KIND_COMMITTED;
while (!cp_lexer_saving_tokens (lexer))
lexer = lexer->next;
cp_lexer_commit_tokens (lexer);
}
}
/* Abort the currently active tentative parse. All consumed tokens
will be rolled back, and no diagnostics will be issued. */
static void
cp_parser_abort_tentative_parse (cp_parser* parser)
{
gcc_assert (parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED
|| errorcount > 0);
cp_parser_simulate_error (parser);
/* Now, pretend that we want to see if the construct was
successfully parsed. */
cp_parser_parse_definitely (parser);
}
/* Stop parsing tentatively. If a parse error has occurred, restore the
token stream. Otherwise, commit to the tokens we have consumed.
Returns true if no error occurred; false otherwise. */
static bool
cp_parser_parse_definitely (cp_parser* parser)
{
bool error_occurred;
cp_parser_context *context;
/* Remember whether or not an error occurred, since we are about to
destroy that information. */
error_occurred = cp_parser_error_occurred (parser);
/* Remove the topmost context from the stack. */
context = parser->context;
parser->context = context->next;
/* If no parse errors occurred, commit to the tentative parse. */
if (!error_occurred)
{
/* Commit to the tokens read tentatively, unless that was
already done. */
if (context->status != CP_PARSER_STATUS_KIND_COMMITTED)
cp_lexer_commit_tokens (parser->lexer);
pop_to_parent_deferring_access_checks ();
}
/* Otherwise, if errors occurred, roll back our state so that things
are just as they were before we began the tentative parse. */
else
{
cp_lexer_rollback_tokens (parser->lexer);
pop_deferring_access_checks ();
}
/* Add the context to the front of the free list. */
context->next = cp_parser_context_free_list;
cp_parser_context_free_list = context;
return !error_occurred;
}
/* Returns true if we are parsing tentatively and are not committed to
this tentative parse. */
static bool
cp_parser_uncommitted_to_tentative_parse_p (cp_parser* parser)
{
return (cp_parser_parsing_tentatively (parser)
&& parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED);
}
/* Returns nonzero iff an error has occurred during the most recent
tentative parse. */
static bool
cp_parser_error_occurred (cp_parser* parser)
{
return (cp_parser_parsing_tentatively (parser)
&& parser->context->status == CP_PARSER_STATUS_KIND_ERROR);
}
/* Returns nonzero if GNU extensions are allowed. */
static bool
cp_parser_allow_gnu_extensions_p (cp_parser* parser)
{
return parser->allow_gnu_extensions_p;
}
/* Objective-C++ Productions */
/* Parse an Objective-C expression, which feeds into a primary-expression
above.
objc-expression:
objc-message-expression
objc-string-literal
objc-encode-expression
objc-protocol-expression
objc-selector-expression
Returns a tree representation of the expression. */
static tree
cp_parser_objc_expression (cp_parser* parser)
{
/* Try to figure out what kind of declaration is present. */
cp_token *kwd = cp_lexer_peek_token (parser->lexer);
switch (kwd->type)
{
case CPP_OPEN_SQUARE:
return cp_parser_objc_message_expression (parser);
case CPP_OBJC_STRING:
kwd = cp_lexer_consume_token (parser->lexer);
return objc_build_string_object (kwd->u.value);
case CPP_KEYWORD:
switch (kwd->keyword)
{
case RID_AT_ENCODE:
return cp_parser_objc_encode_expression (parser);
case RID_AT_PROTOCOL:
return cp_parser_objc_protocol_expression (parser);
case RID_AT_SELECTOR:
return cp_parser_objc_selector_expression (parser);
default:
break;
}
default:
error_at (kwd->location,
"misplaced %<@%D%> Objective-C++ construct",
kwd->u.value);
cp_parser_skip_to_end_of_block_or_statement (parser);
}
return error_mark_node;
}
/* Parse an Objective-C message expression.
objc-message-expression:
[ objc-message-receiver objc-message-args ]
Returns a representation of an Objective-C message. */
static tree
cp_parser_objc_message_expression (cp_parser* parser)
{
tree receiver, messageargs;
cp_lexer_consume_token (parser->lexer); /* Eat '['. */
receiver = cp_parser_objc_message_receiver (parser);
messageargs = cp_parser_objc_message_args (parser);
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
return objc_build_message_expr (receiver, messageargs);
}
/* Parse an objc-message-receiver.
objc-message-receiver:
expression
simple-type-specifier
Returns a representation of the type or expression. */
static tree
cp_parser_objc_message_receiver (cp_parser* parser)
{
tree rcv;
/* An Objective-C message receiver may be either (1) a type
or (2) an expression. */
cp_parser_parse_tentatively (parser);
rcv = cp_parser_expression (parser);
/* If that worked out, fine. */
if (cp_parser_parse_definitely (parser))
return rcv;
cp_parser_parse_tentatively (parser);
rcv = cp_parser_simple_type_specifier (parser,
/*decl_specs=*/NULL,
CP_PARSER_FLAGS_NONE);
if (cp_parser_parse_definitely (parser))
return objc_get_class_reference (rcv);
cp_parser_error (parser, "objective-c++ message receiver expected");
return error_mark_node;
}
/* Parse the arguments and selectors comprising an Objective-C message.
objc-message-args:
objc-selector
objc-selector-args
objc-selector-args , objc-comma-args
objc-selector-args:
objc-selector [opt] : assignment-expression
objc-selector-args objc-selector [opt] : assignment-expression
objc-comma-args:
assignment-expression
objc-comma-args , assignment-expression
Returns a TREE_LIST, with TREE_PURPOSE containing a list of
selector arguments and TREE_VALUE containing a list of comma
arguments. */
static tree
cp_parser_objc_message_args (cp_parser* parser)
{
tree sel_args = NULL_TREE, addl_args = NULL_TREE;
bool maybe_unary_selector_p = true;
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON)
{
tree selector = NULL_TREE, arg;
if (token->type != CPP_COLON)
selector = cp_parser_objc_selector (parser);
/* Detect if we have a unary selector. */
if (maybe_unary_selector_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
return build_tree_list (selector, NULL_TREE);
maybe_unary_selector_p = false;
cp_parser_require (parser, CPP_COLON, RT_COLON);
arg = cp_parser_assignment_expression (parser);
sel_args
= chainon (sel_args,
build_tree_list (selector, arg));
token = cp_lexer_peek_token (parser->lexer);
}
/* Handle non-selector arguments, if any. */
while (token->type == CPP_COMMA)
{
tree arg;
cp_lexer_consume_token (parser->lexer);
arg = cp_parser_assignment_expression (parser);
addl_args
= chainon (addl_args,
build_tree_list (NULL_TREE, arg));
token = cp_lexer_peek_token (parser->lexer);
}
if (sel_args == NULL_TREE && addl_args == NULL_TREE)
{
cp_parser_error (parser, "objective-c++ message argument(s) are expected");
return build_tree_list (error_mark_node, error_mark_node);
}
return build_tree_list (sel_args, addl_args);
}
/* Parse an Objective-C encode expression.
objc-encode-expression:
@encode objc-typename
Returns an encoded representation of the type argument. */
static tree
cp_parser_objc_encode_expression (cp_parser* parser)
{
tree type;
cp_token *token;
cp_lexer_consume_token (parser->lexer); /* Eat '@encode'. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
token = cp_lexer_peek_token (parser->lexer);
type = complete_type (cp_parser_type_id (parser));
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
if (!type)
{
error_at (token->location,
"%<@encode%> must specify a type as an argument");
return error_mark_node;
}
/* This happens if we find @encode(T) (where T is a template
typename or something dependent on a template typename) when
parsing a template. In that case, we can't compile it
immediately, but we rather create an AT_ENCODE_EXPR which will
need to be instantiated when the template is used.
*/
if (dependent_type_p (type))
{
tree value = build_min (AT_ENCODE_EXPR, size_type_node, type);
TREE_READONLY (value) = 1;
return value;
}
return objc_build_encode_expr (type);
}
/* Parse an Objective-C @defs expression. */
static tree
cp_parser_objc_defs_expression (cp_parser *parser)
{
tree name;
cp_lexer_consume_token (parser->lexer); /* Eat '@defs'. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
name = cp_parser_identifier (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return objc_get_class_ivars (name);
}
/* Parse an Objective-C protocol expression.
objc-protocol-expression:
@protocol ( identifier )
Returns a representation of the protocol expression. */
static tree
cp_parser_objc_protocol_expression (cp_parser* parser)
{
tree proto;
cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
proto = cp_parser_identifier (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return objc_build_protocol_expr (proto);
}
/* Parse an Objective-C selector expression.
objc-selector-expression:
@selector ( objc-method-signature )
objc-method-signature:
objc-selector
objc-selector-seq
objc-selector-seq:
objc-selector :
objc-selector-seq objc-selector :
Returns a representation of the method selector. */
static tree
cp_parser_objc_selector_expression (cp_parser* parser)
{
tree sel_seq = NULL_TREE;
bool maybe_unary_selector_p = true;
cp_token *token;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer); /* Eat '@selector'. */
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
token = cp_lexer_peek_token (parser->lexer);
while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON
|| token->type == CPP_SCOPE)
{
tree selector = NULL_TREE;
if (token->type != CPP_COLON
|| token->type == CPP_SCOPE)
selector = cp_parser_objc_selector (parser);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE))
{
/* Detect if we have a unary selector. */
if (maybe_unary_selector_p)
{
sel_seq = selector;
goto finish_selector;
}
else
{
cp_parser_error (parser, "expected %<:%>");
}
}
maybe_unary_selector_p = false;
token = cp_lexer_consume_token (parser->lexer);
if (token->type == CPP_SCOPE)
{
sel_seq
= chainon (sel_seq,
build_tree_list (selector, NULL_TREE));
sel_seq
= chainon (sel_seq,
build_tree_list (NULL_TREE, NULL_TREE));
}
else
sel_seq
= chainon (sel_seq,
build_tree_list (selector, NULL_TREE));
token = cp_lexer_peek_token (parser->lexer);
}
finish_selector:
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
return objc_build_selector_expr (loc, sel_seq);
}
/* Parse a list of identifiers.
objc-identifier-list:
identifier
objc-identifier-list , identifier
Returns a TREE_LIST of identifier nodes. */
static tree
cp_parser_objc_identifier_list (cp_parser* parser)
{
tree identifier;
tree list;
cp_token *sep;
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return error_mark_node;
list = build_tree_list (NULL_TREE, identifier);
sep = cp_lexer_peek_token (parser->lexer);
while (sep->type == CPP_COMMA)
{
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
identifier = cp_parser_identifier (parser);
if (identifier == error_mark_node)
return list;
list = chainon (list, build_tree_list (NULL_TREE,
identifier));
sep = cp_lexer_peek_token (parser->lexer);
}
return list;
}
/* Parse an Objective-C alias declaration.
objc-alias-declaration:
@compatibility_alias identifier identifier ;
This function registers the alias mapping with the Objective-C front end.
It returns nothing. */
static void
cp_parser_objc_alias_declaration (cp_parser* parser)
{
tree alias, orig;
cp_lexer_consume_token (parser->lexer); /* Eat '@compatibility_alias'. */
alias = cp_parser_identifier (parser);
orig = cp_parser_identifier (parser);
objc_declare_alias (alias, orig);
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Parse an Objective-C class forward-declaration.
objc-class-declaration:
@class objc-identifier-list ;
The function registers the forward declarations with the Objective-C
front end. It returns nothing. */
static void
cp_parser_objc_class_declaration (cp_parser* parser)
{
cp_lexer_consume_token (parser->lexer); /* Eat '@class'. */
while (true)
{
tree id;
id = cp_parser_identifier (parser);
if (id == error_mark_node)
break;
objc_declare_class (id);
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Parse a list of Objective-C protocol references.
objc-protocol-refs-opt:
objc-protocol-refs [opt]
objc-protocol-refs:
< objc-identifier-list >
Returns a TREE_LIST of identifiers, if any. */
static tree
cp_parser_objc_protocol_refs_opt (cp_parser* parser)
{
tree protorefs = NULL_TREE;
if(cp_lexer_next_token_is (parser->lexer, CPP_LESS))
{
cp_lexer_consume_token (parser->lexer); /* Eat '<'. */
protorefs = cp_parser_objc_identifier_list (parser);
cp_parser_require (parser, CPP_GREATER, RT_GREATER);
}
return protorefs;
}
/* Parse a Objective-C visibility specification. */
static void
cp_parser_objc_visibility_spec (cp_parser* parser)
{
cp_token *vis = cp_lexer_peek_token (parser->lexer);
switch (vis->keyword)
{
case RID_AT_PRIVATE:
objc_set_visibility (OBJC_IVAR_VIS_PRIVATE);
break;
case RID_AT_PROTECTED:
objc_set_visibility (OBJC_IVAR_VIS_PROTECTED);
break;
case RID_AT_PUBLIC:
objc_set_visibility (OBJC_IVAR_VIS_PUBLIC);
break;
case RID_AT_PACKAGE:
objc_set_visibility (OBJC_IVAR_VIS_PACKAGE);
break;
default:
return;
}
/* Eat '@private'/'@protected'/'@public'. */
cp_lexer_consume_token (parser->lexer);
}
/* Parse an Objective-C method type. Return 'true' if it is a class
(+) method, and 'false' if it is an instance (-) method. */
static inline bool
cp_parser_objc_method_type (cp_parser* parser)
{
if (cp_lexer_consume_token (parser->lexer)->type == CPP_PLUS)
return true;
else
return false;
}
/* Parse an Objective-C protocol qualifier. */
static tree
cp_parser_objc_protocol_qualifiers (cp_parser* parser)
{
tree quals = NULL_TREE, node;
cp_token *token = cp_lexer_peek_token (parser->lexer);
node = token->u.value;
while (node && identifier_p (node)
&& (node == ridpointers [(int) RID_IN]
|| node == ridpointers [(int) RID_OUT]
|| node == ridpointers [(int) RID_INOUT]
|| node == ridpointers [(int) RID_BYCOPY]
|| node == ridpointers [(int) RID_BYREF]
|| node == ridpointers [(int) RID_ONEWAY]))
{
quals = tree_cons (NULL_TREE, node, quals);
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
node = token->u.value;
}
return quals;
}
/* Parse an Objective-C typename. */
static tree
cp_parser_objc_typename (cp_parser* parser)
{
tree type_name = NULL_TREE;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
tree proto_quals, cp_type = NULL_TREE;
cp_lexer_consume_token (parser->lexer); /* Eat '('. */
proto_quals = cp_parser_objc_protocol_qualifiers (parser);
/* An ObjC type name may consist of just protocol qualifiers, in which
case the type shall default to 'id'. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
{
cp_type = cp_parser_type_id (parser);
/* If the type could not be parsed, an error has already
been produced. For error recovery, behave as if it had
not been specified, which will use the default type
'id'. */
if (cp_type == error_mark_node)
{
cp_type = NULL_TREE;
/* We need to skip to the closing parenthesis as
cp_parser_type_id() does not seem to do it for
us. */
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/false);
}
}
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
type_name = build_tree_list (proto_quals, cp_type);
}
return type_name;
}
/* Check to see if TYPE refers to an Objective-C selector name. */
static bool
cp_parser_objc_selector_p (enum cpp_ttype type)
{
return (type == CPP_NAME || type == CPP_KEYWORD
|| type == CPP_AND_AND || type == CPP_AND_EQ || type == CPP_AND
|| type == CPP_OR || type == CPP_COMPL || type == CPP_NOT
|| type == CPP_NOT_EQ || type == CPP_OR_OR || type == CPP_OR_EQ
|| type == CPP_XOR || type == CPP_XOR_EQ);
}
/* Parse an Objective-C selector. */
static tree
cp_parser_objc_selector (cp_parser* parser)
{
cp_token *token = cp_lexer_consume_token (parser->lexer);
if (!cp_parser_objc_selector_p (token->type))
{
error_at (token->location, "invalid Objective-C++ selector name");
return error_mark_node;
}
/* C++ operator names are allowed to appear in ObjC selectors. */
switch (token->type)
{
case CPP_AND_AND: return get_identifier ("and");
case CPP_AND_EQ: return get_identifier ("and_eq");
case CPP_AND: return get_identifier ("bitand");
case CPP_OR: return get_identifier ("bitor");
case CPP_COMPL: return get_identifier ("compl");
case CPP_NOT: return get_identifier ("not");
case CPP_NOT_EQ: return get_identifier ("not_eq");
case CPP_OR_OR: return get_identifier ("or");
case CPP_OR_EQ: return get_identifier ("or_eq");
case CPP_XOR: return get_identifier ("xor");
case CPP_XOR_EQ: return get_identifier ("xor_eq");
default: return token->u.value;
}
}
/* Parse an Objective-C params list. */
static tree
cp_parser_objc_method_keyword_params (cp_parser* parser, tree* attributes)
{
tree params = NULL_TREE;
bool maybe_unary_selector_p = true;
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON)
{
tree selector = NULL_TREE, type_name, identifier;
tree parm_attr = NULL_TREE;
if (token->keyword == RID_ATTRIBUTE)
break;
if (token->type != CPP_COLON)
selector = cp_parser_objc_selector (parser);
/* Detect if we have a unary selector. */
if (maybe_unary_selector_p
&& cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
{
params = selector; /* Might be followed by attributes. */
break;
}
maybe_unary_selector_p = false;
if (!cp_parser_require (parser, CPP_COLON, RT_COLON))
{
/* Something went quite wrong. There should be a colon
here, but there is not. Stop parsing parameters. */
break;
}
type_name = cp_parser_objc_typename (parser);
/* New ObjC allows attributes on parameters too. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE))
parm_attr = cp_parser_attributes_opt (parser);
identifier = cp_parser_identifier (parser);
params
= chainon (params,
objc_build_keyword_decl (selector,
type_name,
identifier,
parm_attr));
token = cp_lexer_peek_token (parser->lexer);
}
if (params == NULL_TREE)
{
cp_parser_error (parser, "objective-c++ method declaration is expected");
return error_mark_node;
}
/* We allow tail attributes for the method. */
if (token->keyword == RID_ATTRIBUTE)
{
*attributes = cp_parser_attributes_opt (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
return params;
cp_parser_error (parser,
"method attributes must be specified at the end");
return error_mark_node;
}
if (params == NULL_TREE)
{
cp_parser_error (parser, "objective-c++ method declaration is expected");
return error_mark_node;
}
return params;
}
/* Parse the non-keyword Objective-C params. */
static tree
cp_parser_objc_method_tail_params_opt (cp_parser* parser, bool *ellipsisp,
tree* attributes)
{
tree params = make_node (TREE_LIST);
cp_token *token = cp_lexer_peek_token (parser->lexer);
*ellipsisp = false; /* Initially, assume no ellipsis. */
while (token->type == CPP_COMMA)
{
cp_parameter_declarator *parmdecl;
tree parm;
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_ELLIPSIS)
{
cp_lexer_consume_token (parser->lexer); /* Eat '...'. */
*ellipsisp = true;
token = cp_lexer_peek_token (parser->lexer);
break;
}
/* TODO: parse attributes for tail parameters. */
parmdecl = cp_parser_parameter_declaration (parser, false, NULL);
parm = grokdeclarator (parmdecl->declarator,
&parmdecl->decl_specifiers,
PARM, /*initialized=*/0,
/*attrlist=*/NULL);
chainon (params, build_tree_list (NULL_TREE, parm));
token = cp_lexer_peek_token (parser->lexer);
}
/* We allow tail attributes for the method. */
if (token->keyword == RID_ATTRIBUTE)
{
if (*attributes == NULL_TREE)
{
*attributes = cp_parser_attributes_opt (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)
|| cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
return params;
}
else
/* We have an error, but parse the attributes, so that we can
carry on. */
*attributes = cp_parser_attributes_opt (parser);
cp_parser_error (parser,
"method attributes must be specified at the end");
return error_mark_node;
}
return params;
}
/* Parse a linkage specification, a pragma, an extra semicolon or a block. */
static void
cp_parser_objc_interstitial_code (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
/* If the next token is `extern' and the following token is a string
literal, then we have a linkage specification. */
if (token->keyword == RID_EXTERN
&& cp_parser_is_pure_string_literal
(cp_lexer_peek_nth_token (parser->lexer, 2)))
cp_parser_linkage_specification (parser);
/* Handle #pragma, if any. */
else if (token->type == CPP_PRAGMA)
cp_parser_pragma (parser, pragma_objc_icode);
/* Allow stray semicolons. */
else if (token->type == CPP_SEMICOLON)
cp_lexer_consume_token (parser->lexer);
/* Mark methods as optional or required, when building protocols. */
else if (token->keyword == RID_AT_OPTIONAL)
{
cp_lexer_consume_token (parser->lexer);
objc_set_method_opt (true);
}
else if (token->keyword == RID_AT_REQUIRED)
{
cp_lexer_consume_token (parser->lexer);
objc_set_method_opt (false);
}
else if (token->keyword == RID_NAMESPACE)
cp_parser_namespace_definition (parser);
/* Other stray characters must generate errors. */
else if (token->type == CPP_OPEN_BRACE || token->type == CPP_CLOSE_BRACE)
{
cp_lexer_consume_token (parser->lexer);
error ("stray %qs between Objective-C++ methods",
token->type == CPP_OPEN_BRACE ? "{" : "}");
}
/* Finally, try to parse a block-declaration, or a function-definition. */
else
cp_parser_block_declaration (parser, /*statement_p=*/false);
}
/* Parse a method signature. */
static tree
cp_parser_objc_method_signature (cp_parser* parser, tree* attributes)
{
tree rettype, kwdparms, optparms;
bool ellipsis = false;
bool is_class_method;
is_class_method = cp_parser_objc_method_type (parser);
rettype = cp_parser_objc_typename (parser);
*attributes = NULL_TREE;
kwdparms = cp_parser_objc_method_keyword_params (parser, attributes);
if (kwdparms == error_mark_node)
return error_mark_node;
optparms = cp_parser_objc_method_tail_params_opt (parser, &ellipsis, attributes);
if (optparms == error_mark_node)
return error_mark_node;
return objc_build_method_signature (is_class_method, rettype, kwdparms, optparms, ellipsis);
}
static bool
cp_parser_objc_method_maybe_bad_prefix_attributes (cp_parser* parser)
{
tree tattr;
cp_lexer_save_tokens (parser->lexer);
tattr = cp_parser_attributes_opt (parser);
gcc_assert (tattr) ;
/* If the attributes are followed by a method introducer, this is not allowed.
Dump the attributes and flag the situation. */
if (cp_lexer_next_token_is (parser->lexer, CPP_PLUS)
|| cp_lexer_next_token_is (parser->lexer, CPP_MINUS))
return true;
/* Otherwise, the attributes introduce some interstitial code, possibly so
rewind to allow that check. */
cp_lexer_rollback_tokens (parser->lexer);
return false;
}
/* Parse an Objective-C method prototype list. */
static void
cp_parser_objc_method_prototype_list (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (token->keyword != RID_AT_END && token->type != CPP_EOF)
{
if (token->type == CPP_PLUS || token->type == CPP_MINUS)
{
tree attributes, sig;
bool is_class_method;
if (token->type == CPP_PLUS)
is_class_method = true;
else
is_class_method = false;
sig = cp_parser_objc_method_signature (parser, &attributes);
if (sig == error_mark_node)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
token = cp_lexer_peek_token (parser->lexer);
continue;
}
objc_add_method_declaration (is_class_method, sig, attributes);
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
else if (token->keyword == RID_AT_PROPERTY)
cp_parser_objc_at_property_declaration (parser);
else if (token->keyword == RID_ATTRIBUTE
&& cp_parser_objc_method_maybe_bad_prefix_attributes(parser))
warning_at (cp_lexer_peek_token (parser->lexer)->location,
OPT_Wattributes,
"prefix attributes are ignored for methods");
else
/* Allow for interspersed non-ObjC++ code. */
cp_parser_objc_interstitial_code (parser);
token = cp_lexer_peek_token (parser->lexer);
}
if (token->type != CPP_EOF)
cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */
else
cp_parser_error (parser, "expected %<@end%>");
objc_finish_interface ();
}
/* Parse an Objective-C method definition list. */
static void
cp_parser_objc_method_definition_list (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
while (token->keyword != RID_AT_END && token->type != CPP_EOF)
{
tree meth;
if (token->type == CPP_PLUS || token->type == CPP_MINUS)
{
cp_token *ptk;
tree sig, attribute;
bool is_class_method;
if (token->type == CPP_PLUS)
is_class_method = true;
else
is_class_method = false;
push_deferring_access_checks (dk_deferred);
sig = cp_parser_objc_method_signature (parser, &attribute);
if (sig == error_mark_node)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
token = cp_lexer_peek_token (parser->lexer);
continue;
}
objc_start_method_definition (is_class_method, sig, attribute,
NULL_TREE);
/* For historical reasons, we accept an optional semicolon. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
ptk = cp_lexer_peek_token (parser->lexer);
if (!(ptk->type == CPP_PLUS || ptk->type == CPP_MINUS
|| ptk->type == CPP_EOF || ptk->keyword == RID_AT_END))
{
perform_deferred_access_checks (tf_warning_or_error);
stop_deferring_access_checks ();
meth = cp_parser_function_definition_after_declarator (parser,
false);
pop_deferring_access_checks ();
objc_finish_method_definition (meth);
}
}
/* The following case will be removed once @synthesize is
completely implemented. */
else if (token->keyword == RID_AT_PROPERTY)
cp_parser_objc_at_property_declaration (parser);
else if (token->keyword == RID_AT_SYNTHESIZE)
cp_parser_objc_at_synthesize_declaration (parser);
else if (token->keyword == RID_AT_DYNAMIC)
cp_parser_objc_at_dynamic_declaration (parser);
else if (token->keyword == RID_ATTRIBUTE
&& cp_parser_objc_method_maybe_bad_prefix_attributes(parser))
warning_at (token->location, OPT_Wattributes,
"prefix attributes are ignored for methods");
else
/* Allow for interspersed non-ObjC++ code. */
cp_parser_objc_interstitial_code (parser);
token = cp_lexer_peek_token (parser->lexer);
}
if (token->type != CPP_EOF)
cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */
else
cp_parser_error (parser, "expected %<@end%>");
objc_finish_implementation ();
}
/* Parse Objective-C ivars. */
static void
cp_parser_objc_class_ivars (cp_parser* parser)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_OPEN_BRACE)
return; /* No ivars specified. */
cp_lexer_consume_token (parser->lexer); /* Eat '{'. */
token = cp_lexer_peek_token (parser->lexer);
while (token->type != CPP_CLOSE_BRACE
&& token->keyword != RID_AT_END && token->type != CPP_EOF)
{
cp_decl_specifier_seq declspecs;
int decl_class_or_enum_p;
tree prefix_attributes;
cp_parser_objc_visibility_spec (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
break;
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_OPTIONAL,
&declspecs,
&decl_class_or_enum_p);
/* auto, register, static, extern, mutable. */
if (declspecs.storage_class != sc_none)
{
cp_parser_error (parser, "invalid type for instance variable");
declspecs.storage_class = sc_none;
}
/* thread_local. */
if (decl_spec_seq_has_spec_p (&declspecs, ds_thread))
{
cp_parser_error (parser, "invalid type for instance variable");
declspecs.locations[ds_thread] = 0;
}
/* typedef. */
if (decl_spec_seq_has_spec_p (&declspecs, ds_typedef))
{
cp_parser_error (parser, "invalid type for instance variable");
declspecs.locations[ds_typedef] = 0;
}
prefix_attributes = declspecs.attributes;
declspecs.attributes = NULL_TREE;
/* Keep going until we hit the `;' at the end of the
declaration. */
while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
tree width = NULL_TREE, attributes, first_attribute, decl;
cp_declarator *declarator = NULL;
int ctor_dtor_or_conv_p;
/* Check for a (possibly unnamed) bitfield declaration. */
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_COLON)
goto eat_colon;
if (token->type == CPP_NAME
&& (cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_COLON))
{
/* Get the name of the bitfield. */
declarator = make_id_declarator (NULL_TREE,
cp_parser_identifier (parser),
sfk_none);
eat_colon:
cp_lexer_consume_token (parser->lexer); /* Eat ':'. */
/* Get the width of the bitfield. */
width
= cp_parser_constant_expression (parser);
}
else
{
/* Parse the declarator. */
declarator
= cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
&ctor_dtor_or_conv_p,
/*parenthesized_p=*/NULL,
/*member_p=*/false,
/*friend_p=*/false);
}
/* Look for attributes that apply to the ivar. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
if (width)
/* Create the bitfield declaration. */
decl = grokbitfield (declarator, &declspecs,
width,
attributes);
else
decl = grokfield (declarator, &declspecs,
NULL_TREE, /*init_const_expr_p=*/false,
NULL_TREE, attributes);
/* Add the instance variable. */
if (decl != error_mark_node && decl != NULL_TREE)
objc_add_instance_variable (decl);
/* Reset PREFIX_ATTRIBUTES. */
while (attributes && TREE_CHAIN (attributes) != first_attribute)
attributes = TREE_CHAIN (attributes);
if (attributes)
TREE_CHAIN (attributes) = NULL_TREE;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_COMMA)
{
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
continue;
}
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
token = cp_lexer_peek_token (parser->lexer);
}
if (token->keyword == RID_AT_END)
cp_parser_error (parser, "expected %<}%>");
/* Do not consume the RID_AT_END, so it will be read again as terminating
the @interface of @implementation. */
if (token->keyword != RID_AT_END && token->type != CPP_EOF)
cp_lexer_consume_token (parser->lexer); /* Eat '}'. */
/* For historical reasons, we accept an optional semicolon. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
}
/* Parse an Objective-C protocol declaration. */
static void
cp_parser_objc_protocol_declaration (cp_parser* parser, tree attributes)
{
tree proto, protorefs;
cp_token *tok;
cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */
if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME))
{
tok = cp_lexer_peek_token (parser->lexer);
error_at (tok->location, "identifier expected after %<@protocol%>");
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
}
/* See if we have a forward declaration or a definition. */
tok = cp_lexer_peek_nth_token (parser->lexer, 2);
/* Try a forward declaration first. */
if (tok->type == CPP_COMMA || tok->type == CPP_SEMICOLON)
{
while (true)
{
tree id;
id = cp_parser_identifier (parser);
if (id == error_mark_node)
break;
objc_declare_protocol (id, attributes);
if(cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Ok, we got a full-fledged definition (or at least should). */
else
{
proto = cp_parser_identifier (parser);
protorefs = cp_parser_objc_protocol_refs_opt (parser);
objc_start_protocol (proto, protorefs, attributes);
cp_parser_objc_method_prototype_list (parser);
}
}
/* Parse an Objective-C superclass or category. */
static void
cp_parser_objc_superclass_or_category (cp_parser *parser,
bool iface_p,
tree *super,
tree *categ, bool *is_class_extension)
{
cp_token *next = cp_lexer_peek_token (parser->lexer);
*super = *categ = NULL_TREE;
*is_class_extension = false;
if (next->type == CPP_COLON)
{
cp_lexer_consume_token (parser->lexer); /* Eat ':'. */
*super = cp_parser_identifier (parser);
}
else if (next->type == CPP_OPEN_PAREN)
{
cp_lexer_consume_token (parser->lexer); /* Eat '('. */
/* If there is no category name, and this is an @interface, we
have a class extension. */
if (iface_p && cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
{
*categ = NULL_TREE;
*is_class_extension = true;
}
else
*categ = cp_parser_identifier (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
}
/* Parse an Objective-C class interface. */
static void
cp_parser_objc_class_interface (cp_parser* parser, tree attributes)
{
tree name, super, categ, protos;
bool is_class_extension;
cp_lexer_consume_token (parser->lexer); /* Eat '@interface'. */
name = cp_parser_identifier (parser);
if (name == error_mark_node)
{
/* It's hard to recover because even if valid @interface stuff
is to follow, we can't compile it (or validate it) if we
don't even know which class it refers to. Let's assume this
was a stray '@interface' token in the stream and skip it.
*/
return;
}
cp_parser_objc_superclass_or_category (parser, true, &super, &categ,
&is_class_extension);
protos = cp_parser_objc_protocol_refs_opt (parser);
/* We have either a class or a category on our hands. */
if (categ || is_class_extension)
objc_start_category_interface (name, categ, protos, attributes);
else
{
objc_start_class_interface (name, super, protos, attributes);
/* Handle instance variable declarations, if any. */
cp_parser_objc_class_ivars (parser);
objc_continue_interface ();
}
cp_parser_objc_method_prototype_list (parser);
}
/* Parse an Objective-C class implementation. */
static void
cp_parser_objc_class_implementation (cp_parser* parser)
{
tree name, super, categ;
bool is_class_extension;
cp_lexer_consume_token (parser->lexer); /* Eat '@implementation'. */
name = cp_parser_identifier (parser);
if (name == error_mark_node)
{
/* It's hard to recover because even if valid @implementation
stuff is to follow, we can't compile it (or validate it) if
we don't even know which class it refers to. Let's assume
this was a stray '@implementation' token in the stream and
skip it.
*/
return;
}
cp_parser_objc_superclass_or_category (parser, false, &super, &categ,
&is_class_extension);
/* We have either a class or a category on our hands. */
if (categ)
objc_start_category_implementation (name, categ);
else
{
objc_start_class_implementation (name, super);
/* Handle instance variable declarations, if any. */
cp_parser_objc_class_ivars (parser);
objc_continue_implementation ();
}
cp_parser_objc_method_definition_list (parser);
}
/* Consume the @end token and finish off the implementation. */
static void
cp_parser_objc_end_implementation (cp_parser* parser)
{
cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */
objc_finish_implementation ();
}
/* Parse an Objective-C declaration. */
static void
cp_parser_objc_declaration (cp_parser* parser, tree attributes)
{
/* Try to figure out what kind of declaration is present. */
cp_token *kwd = cp_lexer_peek_token (parser->lexer);
if (attributes)
switch (kwd->keyword)
{
case RID_AT_ALIAS:
case RID_AT_CLASS:
case RID_AT_END:
error_at (kwd->location, "attributes may not be specified before"
" the %<@%D%> Objective-C++ keyword",
kwd->u.value);
attributes = NULL;
break;
case RID_AT_IMPLEMENTATION:
warning_at (kwd->location, OPT_Wattributes,
"prefix attributes are ignored before %<@%D%>",
kwd->u.value);
attributes = NULL;
default:
break;
}
switch (kwd->keyword)
{
case RID_AT_ALIAS:
cp_parser_objc_alias_declaration (parser);
break;
case RID_AT_CLASS:
cp_parser_objc_class_declaration (parser);
break;
case RID_AT_PROTOCOL:
cp_parser_objc_protocol_declaration (parser, attributes);
break;
case RID_AT_INTERFACE:
cp_parser_objc_class_interface (parser, attributes);
break;
case RID_AT_IMPLEMENTATION:
cp_parser_objc_class_implementation (parser);
break;
case RID_AT_END:
cp_parser_objc_end_implementation (parser);
break;
default:
error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct",
kwd->u.value);
cp_parser_skip_to_end_of_block_or_statement (parser);
}
}
/* Parse an Objective-C try-catch-finally statement.
objc-try-catch-finally-stmt:
@try compound-statement objc-catch-clause-seq [opt]
objc-finally-clause [opt]
objc-catch-clause-seq:
objc-catch-clause objc-catch-clause-seq [opt]
objc-catch-clause:
@catch ( objc-exception-declaration ) compound-statement
objc-finally-clause:
@finally compound-statement
objc-exception-declaration:
parameter-declaration
'...'
where '...' is to be interpreted literally, that is, it means CPP_ELLIPSIS.
Returns NULL_TREE.
PS: This function is identical to c_parser_objc_try_catch_finally_statement
for C. Keep them in sync. */
static tree
cp_parser_objc_try_catch_finally_statement (cp_parser *parser)
{
location_t location;
tree stmt;
cp_parser_require_keyword (parser, RID_AT_TRY, RT_AT_TRY);
location = cp_lexer_peek_token (parser->lexer)->location;
objc_maybe_warn_exceptions (location);
/* NB: The @try block needs to be wrapped in its own STATEMENT_LIST
node, lest it get absorbed into the surrounding block. */
stmt = push_stmt_list ();
cp_parser_compound_statement (parser, NULL, false, false);
objc_begin_try_stmt (location, pop_stmt_list (stmt));
while (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_CATCH))
{
cp_parameter_declarator *parm;
tree parameter_declaration = error_mark_node;
bool seen_open_paren = false;
cp_lexer_consume_token (parser->lexer);
if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
seen_open_paren = true;
if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS))
{
/* We have "@catch (...)" (where the '...' are literally
what is in the code). Skip the '...'.
parameter_declaration is set to NULL_TREE, and
objc_being_catch_clauses() knows that that means
'...'. */
cp_lexer_consume_token (parser->lexer);
parameter_declaration = NULL_TREE;
}
else
{
/* We have "@catch (NSException *exception)" or something
like that. Parse the parameter declaration. */
parm = cp_parser_parameter_declaration (parser, false, NULL);
if (parm == NULL)
parameter_declaration = error_mark_node;
else
parameter_declaration = grokdeclarator (parm->declarator,
&parm->decl_specifiers,
PARM, /*initialized=*/0,
/*attrlist=*/NULL);
}
if (seen_open_paren)
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
else
{
/* If there was no open parenthesis, we are recovering from
an error, and we are trying to figure out what mistake
the user has made. */
/* If there is an immediate closing parenthesis, the user
probably forgot the opening one (ie, they typed "@catch
NSException *e)". Parse the closing parenthesis and keep
going. */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
cp_lexer_consume_token (parser->lexer);
/* If these is no immediate closing parenthesis, the user
probably doesn't know that parenthesis are required at
all (ie, they typed "@catch NSException *e"). So, just
forget about the closing parenthesis and keep going. */
}
objc_begin_catch_clause (parameter_declaration);
cp_parser_compound_statement (parser, NULL, false, false);
objc_finish_catch_clause ();
}
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_FINALLY))
{
cp_lexer_consume_token (parser->lexer);
location = cp_lexer_peek_token (parser->lexer)->location;
/* NB: The @finally block needs to be wrapped in its own STATEMENT_LIST
node, lest it get absorbed into the surrounding block. */
stmt = push_stmt_list ();
cp_parser_compound_statement (parser, NULL, false, false);
objc_build_finally_clause (location, pop_stmt_list (stmt));
}
return objc_finish_try_stmt ();
}
/* Parse an Objective-C synchronized statement.
objc-synchronized-stmt:
@synchronized ( expression ) compound-statement
Returns NULL_TREE. */
static tree
cp_parser_objc_synchronized_statement (cp_parser *parser)
{
location_t location;
tree lock, stmt;
cp_parser_require_keyword (parser, RID_AT_SYNCHRONIZED, RT_AT_SYNCHRONIZED);
location = cp_lexer_peek_token (parser->lexer)->location;
objc_maybe_warn_exceptions (location);
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
lock = cp_parser_expression (parser);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
/* NB: The @synchronized block needs to be wrapped in its own STATEMENT_LIST
node, lest it get absorbed into the surrounding block. */
stmt = push_stmt_list ();
cp_parser_compound_statement (parser, NULL, false, false);
return objc_build_synchronized (location, lock, pop_stmt_list (stmt));
}
/* Parse an Objective-C throw statement.
objc-throw-stmt:
@throw assignment-expression [opt] ;
Returns a constructed '@throw' statement. */
static tree
cp_parser_objc_throw_statement (cp_parser *parser)
{
tree expr = NULL_TREE;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
cp_parser_require_keyword (parser, RID_AT_THROW, RT_AT_THROW);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
expr = cp_parser_expression (parser);
cp_parser_consume_semicolon_at_end_of_statement (parser);
return objc_build_throw_stmt (loc, expr);
}
/* Parse an Objective-C statement. */
static tree
cp_parser_objc_statement (cp_parser * parser)
{
/* Try to figure out what kind of declaration is present. */
cp_token *kwd = cp_lexer_peek_token (parser->lexer);
switch (kwd->keyword)
{
case RID_AT_TRY:
return cp_parser_objc_try_catch_finally_statement (parser);
case RID_AT_SYNCHRONIZED:
return cp_parser_objc_synchronized_statement (parser);
case RID_AT_THROW:
return cp_parser_objc_throw_statement (parser);
default:
error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct",
kwd->u.value);
cp_parser_skip_to_end_of_block_or_statement (parser);
}
return error_mark_node;
}
/* If we are compiling ObjC++ and we see an __attribute__ we neeed to
look ahead to see if an objc keyword follows the attributes. This
is to detect the use of prefix attributes on ObjC @interface and
@protocol. */
static bool
cp_parser_objc_valid_prefix_attributes (cp_parser* parser, tree *attrib)
{
cp_lexer_save_tokens (parser->lexer);
*attrib = cp_parser_attributes_opt (parser);
gcc_assert (*attrib);
if (OBJC_IS_AT_KEYWORD (cp_lexer_peek_token (parser->lexer)->keyword))
{
cp_lexer_commit_tokens (parser->lexer);
return true;
}
cp_lexer_rollback_tokens (parser->lexer);
return false;
}
/* This routine is a minimal replacement for
c_parser_struct_declaration () used when parsing the list of
types/names or ObjC++ properties. For example, when parsing the
code
@property (readonly) int a, b, c;
this function is responsible for parsing "int a, int b, int c" and
returning the declarations as CHAIN of DECLs.
TODO: Share this code with cp_parser_objc_class_ivars. It's very
similar parsing. */
static tree
cp_parser_objc_struct_declaration (cp_parser *parser)
{
tree decls = NULL_TREE;
cp_decl_specifier_seq declspecs;
int decl_class_or_enum_p;
tree prefix_attributes;
cp_parser_decl_specifier_seq (parser,
CP_PARSER_FLAGS_NONE,
&declspecs,
&decl_class_or_enum_p);
if (declspecs.type == error_mark_node)
return error_mark_node;
/* auto, register, static, extern, mutable. */
if (declspecs.storage_class != sc_none)
{
cp_parser_error (parser, "invalid type for property");
declspecs.storage_class = sc_none;
}
/* thread_local. */
if (decl_spec_seq_has_spec_p (&declspecs, ds_thread))
{
cp_parser_error (parser, "invalid type for property");
declspecs.locations[ds_thread] = 0;
}
/* typedef. */
if (decl_spec_seq_has_spec_p (&declspecs, ds_typedef))
{
cp_parser_error (parser, "invalid type for property");
declspecs.locations[ds_typedef] = 0;
}
prefix_attributes = declspecs.attributes;
declspecs.attributes = NULL_TREE;
/* Keep going until we hit the `;' at the end of the declaration. */
while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
tree attributes, first_attribute, decl;
cp_declarator *declarator;
cp_token *token;
/* Parse the declarator. */
declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED,
NULL, NULL, false, false);
/* Look for attributes that apply to the ivar. */
attributes = cp_parser_attributes_opt (parser);
/* Remember which attributes are prefix attributes and
which are not. */
first_attribute = attributes;
/* Combine the attributes. */
attributes = chainon (prefix_attributes, attributes);
decl = grokfield (declarator, &declspecs,
NULL_TREE, /*init_const_expr_p=*/false,
NULL_TREE, attributes);
if (decl == error_mark_node || decl == NULL_TREE)
return error_mark_node;
/* Reset PREFIX_ATTRIBUTES. */
while (attributes && TREE_CHAIN (attributes) != first_attribute)
attributes = TREE_CHAIN (attributes);
if (attributes)
TREE_CHAIN (attributes) = NULL_TREE;
DECL_CHAIN (decl) = decls;
decls = decl;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_COMMA)
{
cp_lexer_consume_token (parser->lexer); /* Eat ','. */
continue;
}
else
break;
}
return decls;
}
/* Parse an Objective-C @property declaration. The syntax is:
objc-property-declaration:
'@property' objc-property-attributes[opt] struct-declaration ;
objc-property-attributes:
'(' objc-property-attribute-list ')'
objc-property-attribute-list:
objc-property-attribute
objc-property-attribute-list, objc-property-attribute
objc-property-attribute
'getter' = identifier
'setter' = identifier
'readonly'
'readwrite'
'assign'
'retain'
'copy'
'nonatomic'
For example:
@property NSString *name;
@property (readonly) id object;
@property (retain, nonatomic, getter=getTheName) id name;
@property int a, b, c;
PS: This function is identical to
c_parser_objc_at_property_declaration for C. Keep them in sync. */
static void
cp_parser_objc_at_property_declaration (cp_parser *parser)
{
/* The following variables hold the attributes of the properties as
parsed. They are 'false' or 'NULL_TREE' if the attribute was not
seen. When we see an attribute, we set them to 'true' (if they
are boolean properties) or to the identifier (if they have an
argument, ie, for getter and setter). Note that here we only
parse the list of attributes, check the syntax and accumulate the
attributes that we find. objc_add_property_declaration() will
then process the information. */
bool property_assign = false;
bool property_copy = false;
tree property_getter_ident = NULL_TREE;
bool property_nonatomic = false;
bool property_readonly = false;
bool property_readwrite = false;
bool property_retain = false;
tree property_setter_ident = NULL_TREE;
/* 'properties' is the list of properties that we read. Usually a
single one, but maybe more (eg, in "@property int a, b, c;" there
are three). */
tree properties;
location_t loc;
loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer); /* Eat '@property'. */
/* Parse the optional attribute list... */
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
/* Eat the '('. */
cp_lexer_consume_token (parser->lexer);
while (true)
{
bool syntax_error = false;
cp_token *token = cp_lexer_peek_token (parser->lexer);
enum rid keyword;
if (token->type != CPP_NAME)
{
cp_parser_error (parser, "expected identifier");
break;
}
keyword = C_RID_CODE (token->u.value);
cp_lexer_consume_token (parser->lexer);
switch (keyword)
{
case RID_ASSIGN: property_assign = true; break;
case RID_COPY: property_copy = true; break;
case RID_NONATOMIC: property_nonatomic = true; break;
case RID_READONLY: property_readonly = true; break;
case RID_READWRITE: property_readwrite = true; break;
case RID_RETAIN: property_retain = true; break;
case RID_GETTER:
case RID_SETTER:
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ))
{
if (keyword == RID_GETTER)
cp_parser_error (parser,
"missing %<=%> (after %<getter%> attribute)");
else
cp_parser_error (parser,
"missing %<=%> (after %<setter%> attribute)");
syntax_error = true;
break;
}
cp_lexer_consume_token (parser->lexer); /* eat the = */
if (!cp_parser_objc_selector_p (cp_lexer_peek_token (parser->lexer)->type))
{
cp_parser_error (parser, "expected identifier");
syntax_error = true;
break;
}
if (keyword == RID_SETTER)
{
if (property_setter_ident != NULL_TREE)
{
cp_parser_error (parser, "the %<setter%> attribute may only be specified once");
cp_lexer_consume_token (parser->lexer);
}
else
property_setter_ident = cp_parser_objc_selector (parser);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON))
cp_parser_error (parser, "setter name must terminate with %<:%>");
else
cp_lexer_consume_token (parser->lexer);
}
else
{
if (property_getter_ident != NULL_TREE)
{
cp_parser_error (parser, "the %<getter%> attribute may only be specified once");
cp_lexer_consume_token (parser->lexer);
}
else
property_getter_ident = cp_parser_objc_selector (parser);
}
break;
default:
cp_parser_error (parser, "unknown property attribute");
syntax_error = true;
break;
}
if (syntax_error)
break;
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
/* FIXME: "@property (setter, assign);" will generate a spurious
"error: expected ‘)’ before ‘,’ token". This is because
cp_parser_require, unlike the C counterpart, will produce an
error even if we are in error recovery. */
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
}
}
/* ... and the property declaration(s). */
properties = cp_parser_objc_struct_declaration (parser);
if (properties == error_mark_node)
{
cp_parser_skip_to_end_of_statement (parser);
/* If the next token is now a `;', consume it. */
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
return;
}
if (properties == NULL_TREE)
cp_parser_error (parser, "expected identifier");
else
{
/* Comma-separated properties are chained together in
reverse order; add them one by one. */
properties = nreverse (properties);
for (; properties; properties = TREE_CHAIN (properties))
objc_add_property_declaration (loc, copy_node (properties),
property_readonly, property_readwrite,
property_assign, property_retain,
property_copy, property_nonatomic,
property_getter_ident, property_setter_ident);
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
}
/* Parse an Objective-C++ @synthesize declaration. The syntax is:
objc-synthesize-declaration:
@synthesize objc-synthesize-identifier-list ;
objc-synthesize-identifier-list:
objc-synthesize-identifier
objc-synthesize-identifier-list, objc-synthesize-identifier
objc-synthesize-identifier
identifier
identifier = identifier
For example:
@synthesize MyProperty;
@synthesize OneProperty, AnotherProperty=MyIvar, YetAnotherProperty;
PS: This function is identical to c_parser_objc_at_synthesize_declaration
for C. Keep them in sync.
*/
static void
cp_parser_objc_at_synthesize_declaration (cp_parser *parser)
{
tree list = NULL_TREE;
location_t loc;
loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer); /* Eat '@synthesize'. */
while (true)
{
tree property, ivar;
property = cp_parser_identifier (parser);
if (property == error_mark_node)
{
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
}
if (cp_lexer_next_token_is (parser->lexer, CPP_EQ))
{
cp_lexer_consume_token (parser->lexer);
ivar = cp_parser_identifier (parser);
if (ivar == error_mark_node)
{
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
}
}
else
ivar = NULL_TREE;
list = chainon (list, build_tree_list (ivar, property));
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
objc_add_synthesize_declaration (loc, list);
}
/* Parse an Objective-C++ @dynamic declaration. The syntax is:
objc-dynamic-declaration:
@dynamic identifier-list ;
For example:
@dynamic MyProperty;
@dynamic MyProperty, AnotherProperty;
PS: This function is identical to c_parser_objc_at_dynamic_declaration
for C. Keep them in sync.
*/
static void
cp_parser_objc_at_dynamic_declaration (cp_parser *parser)
{
tree list = NULL_TREE;
location_t loc;
loc = cp_lexer_peek_token (parser->lexer)->location;
cp_lexer_consume_token (parser->lexer); /* Eat '@dynamic'. */
while (true)
{
tree property;
property = cp_parser_identifier (parser);
if (property == error_mark_node)
{
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
}
list = chainon (list, build_tree_list (NULL, property));
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
cp_parser_consume_semicolon_at_end_of_statement (parser);
objc_add_dynamic_declaration (loc, list);
}
/* OpenMP 2.5 / 3.0 / 3.1 / 4.0 parsing routines. */
/* Returns name of the next clause.
If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and
the token is not consumed. Otherwise appropriate pragma_omp_clause is
returned and the token is consumed. */
static pragma_omp_clause
cp_parser_omp_clause_name (cp_parser *parser)
{
pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_IF))
result = PRAGMA_OMP_CLAUSE_IF;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_DEFAULT))
result = PRAGMA_OMP_CLAUSE_DEFAULT;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_DELETE))
result = PRAGMA_OACC_CLAUSE_DELETE;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_PRIVATE))
result = PRAGMA_OMP_CLAUSE_PRIVATE;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
result = PRAGMA_OMP_CLAUSE_FOR;
else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
switch (p[0])
{
case 'a':
if (!strcmp ("aligned", p))
result = PRAGMA_OMP_CLAUSE_ALIGNED;
else if (!strcmp ("async", p))
result = PRAGMA_OACC_CLAUSE_ASYNC;
break;
case 'c':
if (!strcmp ("collapse", p))
result = PRAGMA_OMP_CLAUSE_COLLAPSE;
else if (!strcmp ("copy", p))
result = PRAGMA_OACC_CLAUSE_COPY;
else if (!strcmp ("copyin", p))
result = PRAGMA_OMP_CLAUSE_COPYIN;
else if (!strcmp ("copyout", p))
result = PRAGMA_OACC_CLAUSE_COPYOUT;
else if (!strcmp ("copyprivate", p))
result = PRAGMA_OMP_CLAUSE_COPYPRIVATE;
else if (!strcmp ("create", p))
result = PRAGMA_OACC_CLAUSE_CREATE;
break;
case 'd':
if (!strcmp ("depend", p))
result = PRAGMA_OMP_CLAUSE_DEPEND;
else if (!strcmp ("device", p))
result = PRAGMA_OMP_CLAUSE_DEVICE;
else if (!strcmp ("deviceptr", p))
result = PRAGMA_OACC_CLAUSE_DEVICEPTR;
else if (!strcmp ("dist_schedule", p))
result = PRAGMA_OMP_CLAUSE_DIST_SCHEDULE;
break;
case 'f':
if (!strcmp ("final", p))
result = PRAGMA_OMP_CLAUSE_FINAL;
else if (!strcmp ("firstprivate", p))
result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE;
else if (!strcmp ("from", p))
result = PRAGMA_OMP_CLAUSE_FROM;
break;
case 'h':
if (!strcmp ("host", p))
result = PRAGMA_OACC_CLAUSE_HOST;
break;
case 'i':
if (!strcmp ("inbranch", p))
result = PRAGMA_OMP_CLAUSE_INBRANCH;
break;
case 'l':
if (!strcmp ("lastprivate", p))
result = PRAGMA_OMP_CLAUSE_LASTPRIVATE;
else if (!strcmp ("linear", p))
result = PRAGMA_OMP_CLAUSE_LINEAR;
break;
case 'm':
if (!strcmp ("map", p))
result = PRAGMA_OMP_CLAUSE_MAP;
else if (!strcmp ("mergeable", p))
result = PRAGMA_OMP_CLAUSE_MERGEABLE;
else if (flag_cilkplus && !strcmp ("mask", p))
result = PRAGMA_CILK_CLAUSE_MASK;
break;
case 'n':
if (!strcmp ("notinbranch", p))
result = PRAGMA_OMP_CLAUSE_NOTINBRANCH;
else if (!strcmp ("nowait", p))
result = PRAGMA_OMP_CLAUSE_NOWAIT;
else if (flag_cilkplus && !strcmp ("nomask", p))
result = PRAGMA_CILK_CLAUSE_NOMASK;
else if (!strcmp ("num_gangs", p))
result = PRAGMA_OACC_CLAUSE_NUM_GANGS;
else if (!strcmp ("num_teams", p))
result = PRAGMA_OMP_CLAUSE_NUM_TEAMS;
else if (!strcmp ("num_threads", p))
result = PRAGMA_OMP_CLAUSE_NUM_THREADS;
else if (!strcmp ("num_workers", p))
result = PRAGMA_OACC_CLAUSE_NUM_WORKERS;
break;
case 'o':
if (!strcmp ("ordered", p))
result = PRAGMA_OMP_CLAUSE_ORDERED;
break;
case 'p':
if (!strcmp ("parallel", p))
result = PRAGMA_OMP_CLAUSE_PARALLEL;
else if (!strcmp ("present", p))
result = PRAGMA_OACC_CLAUSE_PRESENT;
else if (!strcmp ("present_or_copy", p)
|| !strcmp ("pcopy", p))
result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY;
else if (!strcmp ("present_or_copyin", p)
|| !strcmp ("pcopyin", p))
result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN;
else if (!strcmp ("present_or_copyout", p)
|| !strcmp ("pcopyout", p))
result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT;
else if (!strcmp ("present_or_create", p)
|| !strcmp ("pcreate", p))
result = PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE;
else if (!strcmp ("proc_bind", p))
result = PRAGMA_OMP_CLAUSE_PROC_BIND;
break;
case 'r':
if (!strcmp ("reduction", p))
result = PRAGMA_OMP_CLAUSE_REDUCTION;
break;
case 's':
if (!strcmp ("safelen", p))
result = PRAGMA_OMP_CLAUSE_SAFELEN;
else if (!strcmp ("schedule", p))
result = PRAGMA_OMP_CLAUSE_SCHEDULE;
else if (!strcmp ("sections", p))
result = PRAGMA_OMP_CLAUSE_SECTIONS;
else if (!strcmp ("self", p))
result = PRAGMA_OACC_CLAUSE_SELF;
else if (!strcmp ("shared", p))
result = PRAGMA_OMP_CLAUSE_SHARED;
else if (!strcmp ("simdlen", p))
result = PRAGMA_OMP_CLAUSE_SIMDLEN;
break;
case 't':
if (!strcmp ("taskgroup", p))
result = PRAGMA_OMP_CLAUSE_TASKGROUP;
else if (!strcmp ("thread_limit", p))
result = PRAGMA_OMP_CLAUSE_THREAD_LIMIT;
else if (!strcmp ("to", p))
result = PRAGMA_OMP_CLAUSE_TO;
break;
case 'u':
if (!strcmp ("uniform", p))
result = PRAGMA_OMP_CLAUSE_UNIFORM;
else if (!strcmp ("untied", p))
result = PRAGMA_OMP_CLAUSE_UNTIED;
break;
case 'v':
if (!strcmp ("vector_length", p))
result = PRAGMA_OACC_CLAUSE_VECTOR_LENGTH;
else if (flag_cilkplus && !strcmp ("vectorlength", p))
result = PRAGMA_CILK_CLAUSE_VECTORLENGTH;
break;
case 'w':
if (!strcmp ("wait", p))
result = PRAGMA_OACC_CLAUSE_WAIT;
break;
}
}
if (result != PRAGMA_OMP_CLAUSE_NONE)
cp_lexer_consume_token (parser->lexer);
return result;
}
/* Validate that a clause of the given type does not already exist. */
static void
check_no_duplicate_clause (tree clauses, enum omp_clause_code code,
const char *name, location_t location)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == code)
{
error_at (location, "too many %qs clauses", name);
break;
}
}
/* OpenMP 2.5:
variable-list:
identifier
variable-list , identifier
In addition, we match a closing parenthesis (or, if COLON is non-NULL,
colon). An opening parenthesis will have been consumed by the caller.
If KIND is nonzero, create the appropriate node and install the decl
in OMP_CLAUSE_DECL and add the node to the head of the list.
If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE;
return the list created.
COLON can be NULL if only closing parenthesis should end the list,
or pointer to bool which will receive false if the list is terminated
by closing parenthesis or true if the list is terminated by colon. */
static tree
cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind,
tree list, bool *colon)
{
cp_token *token;
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
if (colon)
{
parser->colon_corrects_to_scope_p = false;
*colon = false;
}
while (1)
{
tree name, decl;
token = cp_lexer_peek_token (parser->lexer);
name = cp_parser_id_expression (parser, /*template_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/false,
/*optional_p=*/false);
if (name == error_mark_node)
goto skip_comma;
decl = cp_parser_lookup_name_simple (parser, name, token->location);
if (decl == error_mark_node)
cp_parser_name_lookup_error (parser, name, decl, NLE_NULL,
token->location);
else if (kind != 0)
{
switch (kind)
{
case OMP_CLAUSE__CACHE_:
if (cp_lexer_peek_token (parser->lexer)->type != CPP_OPEN_SQUARE)
{
error_at (token->location, "expected %<[%>");
decl = error_mark_node;
break;
}
/* FALL THROUGH. */
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_DEPEND:
while (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE))
{
tree low_bound = NULL_TREE, length = NULL_TREE;
parser->colon_corrects_to_scope_p = false;
cp_lexer_consume_token (parser->lexer);
if (!cp_lexer_next_token_is (parser->lexer, CPP_COLON))
low_bound = cp_parser_expression (parser);
if (!colon)
parser->colon_corrects_to_scope_p
= saved_colon_corrects_to_scope_p;
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_SQUARE))
length = integer_one_node;
else
{
/* Look for `:'. */
if (!cp_parser_require (parser, CPP_COLON, RT_COLON))
goto skip_comma;
if (!cp_lexer_next_token_is (parser->lexer,
CPP_CLOSE_SQUARE))
length = cp_parser_expression (parser);
}
/* Look for the closing `]'. */
if (!cp_parser_require (parser, CPP_CLOSE_SQUARE,
RT_CLOSE_SQUARE))
goto skip_comma;
if (kind == OMP_CLAUSE__CACHE_)
{
if (TREE_CODE (low_bound) != INTEGER_CST
&& !TREE_READONLY (low_bound))
{
error_at (token->location,
"%qD is not a constant", low_bound);
decl = error_mark_node;
}
if (TREE_CODE (length) != INTEGER_CST
&& !TREE_READONLY (length))
{
error_at (token->location,
"%qD is not a constant", length);
decl = error_mark_node;
}
}
decl = tree_cons (low_bound, length, decl);
}
break;
default:
break;
}
tree u = build_omp_clause (token->location, kind);
OMP_CLAUSE_DECL (u) = decl;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
else
list = tree_cons (decl, NULL_TREE, list);
get_comma:
if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA))
break;
cp_lexer_consume_token (parser->lexer);
}
if (colon)
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
if (colon != NULL && cp_lexer_next_token_is (parser->lexer, CPP_COLON))
{
*colon = true;
cp_parser_require (parser, CPP_COLON, RT_COLON);
return list;
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
int ending;
/* Try to resync to an unnested comma. Copied from
cp_parser_parenthesized_expression_list. */
skip_comma:
if (colon)
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
ending = cp_parser_skip_to_closing_parenthesis (parser,
/*recovering=*/true,
/*or_comma=*/true,
/*consume_paren=*/true);
if (ending < 0)
goto get_comma;
}
return list;
}
/* Similarly, but expect leading and trailing parenthesis. This is a very
common case for omp clauses. */
static tree
cp_parser_omp_var_list (cp_parser *parser, enum omp_clause_code kind, tree list)
{
if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return cp_parser_omp_var_list_no_open (parser, kind, list, NULL);
return list;
}
/* OpenACC 2.0:
copy ( variable-list )
copyin ( variable-list )
copyout ( variable-list )
create ( variable-list )
delete ( variable-list )
present ( variable-list )
present_or_copy ( variable-list )
pcopy ( variable-list )
present_or_copyin ( variable-list )
pcopyin ( variable-list )
present_or_copyout ( variable-list )
pcopyout ( variable-list )
present_or_create ( variable-list )
pcreate ( variable-list ) */
static tree
cp_parser_oacc_data_clause (cp_parser *parser, pragma_omp_clause c_kind,
tree list)
{
enum gomp_map_kind kind;
switch (c_kind)
{
case PRAGMA_OACC_CLAUSE_COPY:
kind = GOMP_MAP_FORCE_TOFROM;
break;
case PRAGMA_OACC_CLAUSE_COPYIN:
kind = GOMP_MAP_FORCE_TO;
break;
case PRAGMA_OACC_CLAUSE_COPYOUT:
kind = GOMP_MAP_FORCE_FROM;
break;
case PRAGMA_OACC_CLAUSE_CREATE:
kind = GOMP_MAP_FORCE_ALLOC;
break;
case PRAGMA_OACC_CLAUSE_DELETE:
kind = GOMP_MAP_FORCE_DEALLOC;
break;
case PRAGMA_OACC_CLAUSE_DEVICE:
kind = GOMP_MAP_FORCE_TO;
break;
case PRAGMA_OACC_CLAUSE_HOST:
case PRAGMA_OACC_CLAUSE_SELF:
kind = GOMP_MAP_FORCE_FROM;
break;
case PRAGMA_OACC_CLAUSE_PRESENT:
kind = GOMP_MAP_FORCE_PRESENT;
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY:
kind = GOMP_MAP_TOFROM;
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN:
kind = GOMP_MAP_TO;
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT:
kind = GOMP_MAP_FROM;
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE:
kind = GOMP_MAP_ALLOC;
break;
default:
gcc_unreachable ();
}
tree nl, c;
nl = cp_parser_omp_var_list (parser, OMP_CLAUSE_MAP, list);
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_SET_MAP_KIND (c, kind);
return nl;
}
/* OpenACC 2.0:
deviceptr ( variable-list ) */
static tree
cp_parser_oacc_data_clause_deviceptr (cp_parser *parser, tree list)
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
tree vars, t;
/* Can't use OMP_CLAUSE_MAP here (that is, can't use the generic
cp_parser_oacc_data_clause), as for PRAGMA_OACC_CLAUSE_DEVICEPTR,
variable-list must only allow for pointer variables. */
vars = cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL);
for (t = vars; t; t = TREE_CHAIN (t))
{
tree v = TREE_PURPOSE (t);
/* FIXME diagnostics: Ideally we should keep individual
locations for all the variables in the var list to make the
following errors more precise. Perhaps
c_parser_omp_var_list_parens should construct a list of
locations to go along with the var list. */
if (TREE_CODE (v) != VAR_DECL)
error_at (loc, "%qD is not a variable", v);
else if (TREE_TYPE (v) == error_mark_node)
;
else if (!POINTER_TYPE_P (TREE_TYPE (v)))
error_at (loc, "%qD is not a pointer variable", v);
tree u = build_omp_clause (loc, OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (u, GOMP_MAP_FORCE_DEVICEPTR);
OMP_CLAUSE_DECL (u) = v;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
return list;
}
/* OpenACC:
vector_length ( expression ) */
static tree
cp_parser_oacc_clause_vector_length (cp_parser *parser, tree list)
{
tree t, c;
location_t location = cp_lexer_peek_token (parser->lexer)->location;
bool error = false;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_condition (parser);
if (t == error_mark_node || !INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error_at (location, "expected positive integer expression");
error = true;
}
if (error || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
{
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
check_no_duplicate_clause (list, OMP_CLAUSE_VECTOR_LENGTH, "vector_length",
location);
c = build_omp_clause (location, OMP_CLAUSE_VECTOR_LENGTH);
OMP_CLAUSE_VECTOR_LENGTH_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
return list;
}
/* OpenACC 2.0
Parse wait clause or directive parameters. */
static tree
cp_parser_oacc_wait_list (cp_parser *parser, location_t clause_loc, tree list)
{
vec<tree, va_gc> *args;
tree t, args_tree;
args = cp_parser_parenthesized_expression_list (parser, non_attr,
/*cast_p=*/false,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL);
if (args == NULL || args->length () == 0)
{
cp_parser_error (parser, "expected integer expression before ')'");
if (args != NULL)
release_tree_vector (args);
return list;
}
args_tree = build_tree_list_vec (args);
release_tree_vector (args);
for (t = args_tree; t; t = TREE_CHAIN (t))
{
tree targ = TREE_VALUE (t);
if (targ != error_mark_node)
{
if (!INTEGRAL_TYPE_P (TREE_TYPE (targ)))
error ("%<wait%> expression must be integral");
else
{
tree c = build_omp_clause (clause_loc, OMP_CLAUSE_WAIT);
mark_rvalue_use (targ);
OMP_CLAUSE_DECL (c) = targ;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
}
}
return list;
}
/* OpenACC:
wait ( int-expr-list ) */
static tree
cp_parser_oacc_clause_wait (cp_parser *parser, tree list)
{
location_t location = cp_lexer_peek_token (parser->lexer)->location;
if (cp_lexer_peek_token (parser->lexer)->type != CPP_OPEN_PAREN)
return list;
list = cp_parser_oacc_wait_list (parser, location, list);
return list;
}
/* OpenMP 3.0:
collapse ( constant-expression ) */
static tree
cp_parser_omp_clause_collapse (cp_parser *parser, tree list, location_t location)
{
tree c, num;
location_t loc;
HOST_WIDE_INT n;
loc = cp_lexer_peek_token (parser->lexer)->location;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
num = cp_parser_constant_expression (parser);
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (num == error_mark_node)
return list;
num = fold_non_dependent_expr (num);
if (!INTEGRAL_TYPE_P (TREE_TYPE (num))
|| !tree_fits_shwi_p (num)
|| (n = tree_to_shwi (num)) <= 0
|| (int) n != n)
{
error_at (loc, "collapse argument needs positive constant integer expression");
return list;
}
check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse", location);
c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_COLLAPSE_EXPR (c) = num;
return c;
}
/* OpenMP 2.5:
default ( shared | none ) */
static tree
cp_parser_omp_clause_default (cp_parser *parser, tree list, location_t location)
{
enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
tree c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
switch (p[0])
{
case 'n':
if (strcmp ("none", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_NONE;
break;
case 's':
if (strcmp ("shared", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_SHARED;
break;
default:
goto invalid_kind;
}
cp_lexer_consume_token (parser->lexer);
}
else
{
invalid_kind:
cp_parser_error (parser, "expected %<none%> or %<shared%>");
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default", location);
c = build_omp_clause (location, OMP_CLAUSE_DEFAULT);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_DEFAULT_KIND (c) = kind;
return c;
}
/* OpenMP 3.1:
final ( expression ) */
static tree
cp_parser_omp_clause_final (cp_parser *parser, tree list, location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_condition (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_FINAL, "final", location);
c = build_omp_clause (location, OMP_CLAUSE_FINAL);
OMP_CLAUSE_FINAL_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
if ( expression ) */
static tree
cp_parser_omp_clause_if (cp_parser *parser, tree list, location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_condition (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if", location);
c = build_omp_clause (location, OMP_CLAUSE_IF);
OMP_CLAUSE_IF_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 3.1:
mergeable */
static tree
cp_parser_omp_clause_mergeable (cp_parser * /*parser*/,
tree list, location_t location)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_MERGEABLE, "mergeable",
location);
c = build_omp_clause (location, OMP_CLAUSE_MERGEABLE);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
nowait */
static tree
cp_parser_omp_clause_nowait (cp_parser * /*parser*/,
tree list, location_t location)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait", location);
c = build_omp_clause (location, OMP_CLAUSE_NOWAIT);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenACC:
num_gangs ( expression ) */
static tree
cp_parser_omp_clause_num_gangs (cp_parser *parser, tree list)
{
tree t, c;
location_t location = cp_lexer_peek_token (parser->lexer)->location;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_condition (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error_at (location, "expected positive integer expression");
return list;
}
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_GANGS, "num_gangs", location);
c = build_omp_clause (location, OMP_CLAUSE_NUM_GANGS);
OMP_CLAUSE_NUM_GANGS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
return list;
}
/* OpenMP 2.5:
num_threads ( expression ) */
static tree
cp_parser_omp_clause_num_threads (cp_parser *parser, tree list,
location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_expression (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS,
"num_threads", location);
c = build_omp_clause (location, OMP_CLAUSE_NUM_THREADS);
OMP_CLAUSE_NUM_THREADS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenACC:
num_workers ( expression ) */
static tree
cp_parser_omp_clause_num_workers (cp_parser *parser, tree list)
{
tree t, c;
location_t location = cp_lexer_peek_token (parser->lexer)->location;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_condition (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error_at (location, "expected positive integer expression");
return list;
}
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_WORKERS, "num_gangs",
location);
c = build_omp_clause (location, OMP_CLAUSE_NUM_WORKERS);
OMP_CLAUSE_NUM_WORKERS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
return list;
}
/* OpenMP 2.5:
ordered */
static tree
cp_parser_omp_clause_ordered (cp_parser * /*parser*/,
tree list, location_t location)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED,
"ordered", location);
c = build_omp_clause (location, OMP_CLAUSE_ORDERED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
reduction ( reduction-operator : variable-list )
reduction-operator:
One of: + * - & ^ | && ||
OpenMP 3.1:
reduction-operator:
One of: + * - & ^ | && || min max
OpenMP 4.0:
reduction-operator:
One of: + * - & ^ | && ||
id-expression */
static tree
cp_parser_omp_clause_reduction (cp_parser *parser, tree list)
{
enum tree_code code = ERROR_MARK;
tree nlist, c, id = NULL_TREE;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_PLUS: code = PLUS_EXPR; break;
case CPP_MULT: code = MULT_EXPR; break;
case CPP_MINUS: code = MINUS_EXPR; break;
case CPP_AND: code = BIT_AND_EXPR; break;
case CPP_XOR: code = BIT_XOR_EXPR; break;
case CPP_OR: code = BIT_IOR_EXPR; break;
case CPP_AND_AND: code = TRUTH_ANDIF_EXPR; break;
case CPP_OR_OR: code = TRUTH_ORIF_EXPR; break;
default: break;
}
if (code != ERROR_MARK)
cp_lexer_consume_token (parser->lexer);
else
{
bool saved_colon_corrects_to_scope_p;
saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
id = cp_parser_id_expression (parser, /*template_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/false,
/*optional_p=*/false);
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
if (identifier_p (id))
{
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "min") == 0)
code = MIN_EXPR;
else if (strcmp (p, "max") == 0)
code = MAX_EXPR;
else if (id == ansi_opname (PLUS_EXPR))
code = PLUS_EXPR;
else if (id == ansi_opname (MULT_EXPR))
code = MULT_EXPR;
else if (id == ansi_opname (MINUS_EXPR))
code = MINUS_EXPR;
else if (id == ansi_opname (BIT_AND_EXPR))
code = BIT_AND_EXPR;
else if (id == ansi_opname (BIT_IOR_EXPR))
code = BIT_IOR_EXPR;
else if (id == ansi_opname (BIT_XOR_EXPR))
code = BIT_XOR_EXPR;
else if (id == ansi_opname (TRUTH_ANDIF_EXPR))
code = TRUTH_ANDIF_EXPR;
else if (id == ansi_opname (TRUTH_ORIF_EXPR))
code = TRUTH_ORIF_EXPR;
id = omp_reduction_id (code, id, NULL_TREE);
tree scope = parser->scope;
if (scope)
id = build_qualified_name (NULL_TREE, scope, id, false);
parser->scope = NULL_TREE;
parser->qualifying_scope = NULL_TREE;
parser->object_scope = NULL_TREE;
}
else
{
error ("invalid reduction-identifier");
resync_fail:
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
}
if (!cp_parser_require (parser, CPP_COLON, RT_COLON))
goto resync_fail;
nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_REDUCTION, list,
NULL);
for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c))
{
OMP_CLAUSE_REDUCTION_CODE (c) = code;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = id;
}
return nlist;
}
/* OpenMP 2.5:
schedule ( schedule-kind )
schedule ( schedule-kind , expression )
schedule-kind:
static | dynamic | guided | runtime | auto */
static tree
cp_parser_omp_clause_schedule (cp_parser *parser, tree list, location_t location)
{
tree c, t;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
c = build_omp_clause (location, OMP_CLAUSE_SCHEDULE);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
switch (p[0])
{
case 'd':
if (strcmp ("dynamic", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
break;
case 'g':
if (strcmp ("guided", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED;
break;
case 'r':
if (strcmp ("runtime", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
break;
default:
goto invalid_kind;
}
}
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AUTO))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO;
else
goto invalid_kind;
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_token *token;
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
t = cp_parser_assignment_expression (parser);
if (t == error_mark_node)
goto resync_fail;
else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME)
error_at (token->location, "schedule %<runtime%> does not take "
"a %<chunk_size%> parameter");
else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO)
error_at (token->location, "schedule %<auto%> does not take "
"a %<chunk_size%> parameter");
else
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t;
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
goto resync_fail;
}
else if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_COMMA_CLOSE_PAREN))
goto resync_fail;
check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule", location);
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
cp_parser_error (parser, "invalid schedule kind");
resync_fail:
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
/* OpenMP 3.0:
untied */
static tree
cp_parser_omp_clause_untied (cp_parser * /*parser*/,
tree list, location_t location)
{
tree c;
check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied", location);
c = build_omp_clause (location, OMP_CLAUSE_UNTIED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
inbranch
notinbranch */
static tree
cp_parser_omp_clause_branch (cp_parser * /*parser*/, enum omp_clause_code code,
tree list, location_t location)
{
check_no_duplicate_clause (list, code, omp_clause_code_name[code], location);
tree c = build_omp_clause (location, code);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
parallel
for
sections
taskgroup */
static tree
cp_parser_omp_clause_cancelkind (cp_parser * /*parser*/,
enum omp_clause_code code,
tree list, location_t location)
{
tree c = build_omp_clause (location, code);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
num_teams ( expression ) */
static tree
cp_parser_omp_clause_num_teams (cp_parser *parser, tree list,
location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_expression (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_TEAMS,
"num_teams", location);
c = build_omp_clause (location, OMP_CLAUSE_NUM_TEAMS);
OMP_CLAUSE_NUM_TEAMS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
thread_limit ( expression ) */
static tree
cp_parser_omp_clause_thread_limit (cp_parser *parser, tree list,
location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_expression (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_THREAD_LIMIT,
"thread_limit", location);
c = build_omp_clause (location, OMP_CLAUSE_THREAD_LIMIT);
OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
aligned ( variable-list )
aligned ( variable-list : constant-expression ) */
static tree
cp_parser_omp_clause_aligned (cp_parser *parser, tree list)
{
tree nlist, c, alignment = NULL_TREE;
bool colon;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_ALIGNED, list,
&colon);
if (colon)
{
alignment = cp_parser_constant_expression (parser);
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (alignment == error_mark_node)
alignment = NULL_TREE;
}
for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = alignment;
return nlist;
}
/* OpenMP 4.0:
linear ( variable-list )
linear ( variable-list : expression ) */
static tree
cp_parser_omp_clause_linear (cp_parser *parser, tree list,
bool is_cilk_simd_fn)
{
tree nlist, c, step = integer_one_node;
bool colon;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_LINEAR, list,
&colon);
if (colon)
{
step = cp_parser_expression (parser);
if (is_cilk_simd_fn && TREE_CODE (step) == PARM_DECL)
{
sorry ("using parameters for %<linear%> step is not supported yet");
step = integer_one_node;
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (step == error_mark_node)
return list;
}
for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_LINEAR_STEP (c) = step;
return nlist;
}
/* OpenMP 4.0:
safelen ( constant-expression ) */
static tree
cp_parser_omp_clause_safelen (cp_parser *parser, tree list,
location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_constant_expression (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_SAFELEN, "safelen", location);
c = build_omp_clause (location, OMP_CLAUSE_SAFELEN);
OMP_CLAUSE_SAFELEN_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
simdlen ( constant-expression ) */
static tree
cp_parser_omp_clause_simdlen (cp_parser *parser, tree list,
location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_constant_expression (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_SIMDLEN, "simdlen", location);
c = build_omp_clause (location, OMP_CLAUSE_SIMDLEN);
OMP_CLAUSE_SIMDLEN_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
depend ( depend-kind : variable-list )
depend-kind:
in | out | inout */
static tree
cp_parser_omp_clause_depend (cp_parser *parser, tree list)
{
tree nlist, c;
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_INOUT;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp ("in", p) == 0)
kind = OMP_CLAUSE_DEPEND_IN;
else if (strcmp ("inout", p) == 0)
kind = OMP_CLAUSE_DEPEND_INOUT;
else if (strcmp ("out", p) == 0)
kind = OMP_CLAUSE_DEPEND_OUT;
else
goto invalid_kind;
}
else
goto invalid_kind;
cp_lexer_consume_token (parser->lexer);
if (!cp_parser_require (parser, CPP_COLON, RT_COLON))
goto resync_fail;
nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_DEPEND, list,
NULL);
for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_DEPEND_KIND (c) = kind;
return nlist;
invalid_kind:
cp_parser_error (parser, "invalid depend kind");
resync_fail:
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
/* OpenMP 4.0:
map ( map-kind : variable-list )
map ( variable-list )
map-kind:
alloc | to | from | tofrom */
static tree
cp_parser_omp_clause_map (cp_parser *parser, tree list)
{
tree nlist, c;
enum gomp_map_kind kind = GOMP_MAP_TOFROM;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp ("alloc", p) == 0)
kind = GOMP_MAP_ALLOC;
else if (strcmp ("to", p) == 0)
kind = GOMP_MAP_TO;
else if (strcmp ("from", p) == 0)
kind = GOMP_MAP_FROM;
else if (strcmp ("tofrom", p) == 0)
kind = GOMP_MAP_TOFROM;
else
{
cp_parser_error (parser, "invalid map kind");
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
cp_lexer_consume_token (parser->lexer);
cp_lexer_consume_token (parser->lexer);
}
nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_MAP, list,
NULL);
for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_SET_MAP_KIND (c, kind);
return nlist;
}
/* OpenMP 4.0:
device ( expression ) */
static tree
cp_parser_omp_clause_device (cp_parser *parser, tree list,
location_t location)
{
tree t, c;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
t = cp_parser_expression (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
check_no_duplicate_clause (list, OMP_CLAUSE_DEVICE,
"device", location);
c = build_omp_clause (location, OMP_CLAUSE_DEVICE);
OMP_CLAUSE_DEVICE_ID (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
dist_schedule ( static )
dist_schedule ( static , expression ) */
static tree
cp_parser_omp_clause_dist_schedule (cp_parser *parser, tree list,
location_t location)
{
tree c, t;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
c = build_omp_clause (location, OMP_CLAUSE_DIST_SCHEDULE);
if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC))
goto invalid_kind;
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
{
cp_lexer_consume_token (parser->lexer);
t = cp_parser_assignment_expression (parser);
if (t == error_mark_node)
goto resync_fail;
OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t;
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
goto resync_fail;
}
else if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_COMMA_CLOSE_PAREN))
goto resync_fail;
check_no_duplicate_clause (list, OMP_CLAUSE_DIST_SCHEDULE, "dist_schedule",
location);
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
cp_parser_error (parser, "invalid dist_schedule kind");
resync_fail:
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
/* OpenMP 4.0:
proc_bind ( proc-bind-kind )
proc-bind-kind:
master | close | spread */
static tree
cp_parser_omp_clause_proc_bind (cp_parser *parser, tree list,
location_t location)
{
tree c;
enum omp_clause_proc_bind_kind kind;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return list;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp ("master", p) == 0)
kind = OMP_CLAUSE_PROC_BIND_MASTER;
else if (strcmp ("close", p) == 0)
kind = OMP_CLAUSE_PROC_BIND_CLOSE;
else if (strcmp ("spread", p) == 0)
kind = OMP_CLAUSE_PROC_BIND_SPREAD;
else
goto invalid_kind;
}
else
goto invalid_kind;
cp_lexer_consume_token (parser->lexer);
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_COMMA_CLOSE_PAREN))
goto resync_fail;
c = build_omp_clause (location, OMP_CLAUSE_PROC_BIND);
check_no_duplicate_clause (list, OMP_CLAUSE_PROC_BIND, "proc_bind",
location);
OMP_CLAUSE_PROC_BIND_KIND (c) = kind;
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
cp_parser_error (parser, "invalid depend kind");
resync_fail:
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
return list;
}
/* OpenACC:
async [( int-expr )] */
static tree
cp_parser_oacc_clause_async (cp_parser *parser, tree list)
{
tree c, t;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
t = build_int_cst (integer_type_node, GOMP_ASYNC_NOVAL);
if (cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN)
{
cp_lexer_consume_token (parser->lexer);
t = cp_parser_expression (parser);
if (t == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
}
check_no_duplicate_clause (list, OMP_CLAUSE_ASYNC, "async", loc);
c = build_omp_clause (loc, OMP_CLAUSE_ASYNC);
OMP_CLAUSE_ASYNC_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
return list;
}
/* Parse all OpenACC clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found. */
static tree
cp_parser_oacc_all_clauses (cp_parser *parser, omp_clause_mask mask,
const char *where, cp_token *pragma_tok,
bool finish_p = true)
{
tree clauses = NULL;
bool first = true;
while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL))
{
location_t here;
pragma_omp_clause c_kind;
const char *c_name;
tree prev = clauses;
if (!first && cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
here = cp_lexer_peek_token (parser->lexer)->location;
c_kind = cp_parser_omp_clause_name (parser);
switch (c_kind)
{
case PRAGMA_OACC_CLAUSE_ASYNC:
clauses = cp_parser_oacc_clause_async (parser, clauses);
c_name = "async";
break;
case PRAGMA_OACC_CLAUSE_COLLAPSE:
clauses = cp_parser_omp_clause_collapse (parser, clauses, here);
c_name = "collapse";
break;
case PRAGMA_OACC_CLAUSE_COPY:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "copy";
break;
case PRAGMA_OACC_CLAUSE_COPYIN:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "copyin";
break;
case PRAGMA_OACC_CLAUSE_COPYOUT:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "copyout";
break;
case PRAGMA_OACC_CLAUSE_CREATE:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "create";
break;
case PRAGMA_OACC_CLAUSE_DELETE:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "delete";
break;
case PRAGMA_OACC_CLAUSE_DEVICE:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "device";
break;
case PRAGMA_OACC_CLAUSE_DEVICEPTR:
clauses = cp_parser_oacc_data_clause_deviceptr (parser, clauses);
c_name = "deviceptr";
break;
case PRAGMA_OACC_CLAUSE_HOST:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "host";
break;
case PRAGMA_OACC_CLAUSE_IF:
clauses = cp_parser_omp_clause_if (parser, clauses, here);
c_name = "if";
break;
case PRAGMA_OACC_CLAUSE_NUM_GANGS:
clauses = cp_parser_omp_clause_num_gangs (parser, clauses);
c_name = "num_gangs";
break;
case PRAGMA_OACC_CLAUSE_NUM_WORKERS:
clauses = cp_parser_omp_clause_num_workers (parser, clauses);
c_name = "num_workers";
break;
case PRAGMA_OACC_CLAUSE_PRESENT:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "present";
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "present_or_copy";
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "present_or_copyin";
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "present_or_copyout";
break;
case PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "present_or_create";
break;
case PRAGMA_OACC_CLAUSE_REDUCTION:
clauses = cp_parser_omp_clause_reduction (parser, clauses);
c_name = "reduction";
break;
case PRAGMA_OACC_CLAUSE_SELF:
clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "self";
break;
case PRAGMA_OACC_CLAUSE_VECTOR_LENGTH:
clauses = cp_parser_oacc_clause_vector_length (parser, clauses);
c_name = "vector_length";
break;
case PRAGMA_OACC_CLAUSE_WAIT:
clauses = cp_parser_oacc_clause_wait (parser, clauses);
c_name = "wait";
break;
default:
cp_parser_error (parser, "expected %<#pragma acc%> clause");
goto saw_error;
}
first = false;
if (((mask >> c_kind) & 1) == 0)
{
/* Remove the invalid clause(s) from the list to avoid
confusing the rest of the compiler. */
clauses = prev;
error_at (here, "%qs is not valid for %qs", c_name, where);
}
}
saw_error:
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
if (finish_p)
return finish_omp_clauses (clauses);
return clauses;
}
/* Parse all OpenMP clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found; the result
of clause default goes in *pdefault. */
static tree
cp_parser_omp_all_clauses (cp_parser *parser, omp_clause_mask mask,
const char *where, cp_token *pragma_tok,
bool finish_p = true)
{
tree clauses = NULL;
bool first = true;
cp_token *token = NULL;
bool cilk_simd_fn = false;
while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL))
{
pragma_omp_clause c_kind;
const char *c_name;
tree prev = clauses;
if (!first && cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
token = cp_lexer_peek_token (parser->lexer);
c_kind = cp_parser_omp_clause_name (parser);
switch (c_kind)
{
case PRAGMA_OMP_CLAUSE_COLLAPSE:
clauses = cp_parser_omp_clause_collapse (parser, clauses,
token->location);
c_name = "collapse";
break;
case PRAGMA_OMP_CLAUSE_COPYIN:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYIN, clauses);
c_name = "copyin";
break;
case PRAGMA_OMP_CLAUSE_COPYPRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYPRIVATE,
clauses);
c_name = "copyprivate";
break;
case PRAGMA_OMP_CLAUSE_DEFAULT:
clauses = cp_parser_omp_clause_default (parser, clauses,
token->location);
c_name = "default";
break;
case PRAGMA_OMP_CLAUSE_FINAL:
clauses = cp_parser_omp_clause_final (parser, clauses, token->location);
c_name = "final";
break;
case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_FIRSTPRIVATE,
clauses);
c_name = "firstprivate";
break;
case PRAGMA_OMP_CLAUSE_IF:
clauses = cp_parser_omp_clause_if (parser, clauses, token->location);
c_name = "if";
break;
case PRAGMA_OMP_CLAUSE_LASTPRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_LASTPRIVATE,
clauses);
c_name = "lastprivate";
break;
case PRAGMA_OMP_CLAUSE_MERGEABLE:
clauses = cp_parser_omp_clause_mergeable (parser, clauses,
token->location);
c_name = "mergeable";
break;
case PRAGMA_OMP_CLAUSE_NOWAIT:
clauses = cp_parser_omp_clause_nowait (parser, clauses, token->location);
c_name = "nowait";
break;
case PRAGMA_OMP_CLAUSE_NUM_THREADS:
clauses = cp_parser_omp_clause_num_threads (parser, clauses,
token->location);
c_name = "num_threads";
break;
case PRAGMA_OMP_CLAUSE_ORDERED:
clauses = cp_parser_omp_clause_ordered (parser, clauses,
token->location);
c_name = "ordered";
break;
case PRAGMA_OMP_CLAUSE_PRIVATE:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_PRIVATE,
clauses);
c_name = "private";
break;
case PRAGMA_OMP_CLAUSE_REDUCTION:
clauses = cp_parser_omp_clause_reduction (parser, clauses);
c_name = "reduction";
break;
case PRAGMA_OMP_CLAUSE_SCHEDULE:
clauses = cp_parser_omp_clause_schedule (parser, clauses,
token->location);
c_name = "schedule";
break;
case PRAGMA_OMP_CLAUSE_SHARED:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_SHARED,
clauses);
c_name = "shared";
break;
case PRAGMA_OMP_CLAUSE_UNTIED:
clauses = cp_parser_omp_clause_untied (parser, clauses,
token->location);
c_name = "untied";
break;
case PRAGMA_OMP_CLAUSE_INBRANCH:
case PRAGMA_CILK_CLAUSE_MASK:
clauses = cp_parser_omp_clause_branch (parser, OMP_CLAUSE_INBRANCH,
clauses, token->location);
c_name = "inbranch";
break;
case PRAGMA_OMP_CLAUSE_NOTINBRANCH:
case PRAGMA_CILK_CLAUSE_NOMASK:
clauses = cp_parser_omp_clause_branch (parser,
OMP_CLAUSE_NOTINBRANCH,
clauses, token->location);
c_name = "notinbranch";
break;
case PRAGMA_OMP_CLAUSE_PARALLEL:
clauses = cp_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_PARALLEL,
clauses, token->location);
c_name = "parallel";
if (!first)
{
clause_not_first:
error_at (token->location, "%qs must be the first clause of %qs",
c_name, where);
clauses = prev;
}
break;
case PRAGMA_OMP_CLAUSE_FOR:
clauses = cp_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_FOR,
clauses, token->location);
c_name = "for";
if (!first)
goto clause_not_first;
break;
case PRAGMA_OMP_CLAUSE_SECTIONS:
clauses = cp_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_SECTIONS,
clauses, token->location);
c_name = "sections";
if (!first)
goto clause_not_first;
break;
case PRAGMA_OMP_CLAUSE_TASKGROUP:
clauses = cp_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_TASKGROUP,
clauses, token->location);
c_name = "taskgroup";
if (!first)
goto clause_not_first;
break;
case PRAGMA_OMP_CLAUSE_TO:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_TO,
clauses);
c_name = "to";
break;
case PRAGMA_OMP_CLAUSE_FROM:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_FROM,
clauses);
c_name = "from";
break;
case PRAGMA_OMP_CLAUSE_UNIFORM:
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_UNIFORM,
clauses);
c_name = "uniform";
break;
case PRAGMA_OMP_CLAUSE_NUM_TEAMS:
clauses = cp_parser_omp_clause_num_teams (parser, clauses,
token->location);
c_name = "num_teams";
break;
case PRAGMA_OMP_CLAUSE_THREAD_LIMIT:
clauses = cp_parser_omp_clause_thread_limit (parser, clauses,
token->location);
c_name = "thread_limit";
break;
case PRAGMA_OMP_CLAUSE_ALIGNED:
clauses = cp_parser_omp_clause_aligned (parser, clauses);
c_name = "aligned";
break;
case PRAGMA_OMP_CLAUSE_LINEAR:
if (((mask >> PRAGMA_CILK_CLAUSE_VECTORLENGTH) & 1) != 0)
cilk_simd_fn = true;
clauses = cp_parser_omp_clause_linear (parser, clauses, cilk_simd_fn);
c_name = "linear";
break;
case PRAGMA_OMP_CLAUSE_DEPEND:
clauses = cp_parser_omp_clause_depend (parser, clauses);
c_name = "depend";
break;
case PRAGMA_OMP_CLAUSE_MAP:
clauses = cp_parser_omp_clause_map (parser, clauses);
c_name = "map";
break;
case PRAGMA_OMP_CLAUSE_DEVICE:
clauses = cp_parser_omp_clause_device (parser, clauses,
token->location);
c_name = "device";
break;
case PRAGMA_OMP_CLAUSE_DIST_SCHEDULE:
clauses = cp_parser_omp_clause_dist_schedule (parser, clauses,
token->location);
c_name = "dist_schedule";
break;
case PRAGMA_OMP_CLAUSE_PROC_BIND:
clauses = cp_parser_omp_clause_proc_bind (parser, clauses,
token->location);
c_name = "proc_bind";
break;
case PRAGMA_OMP_CLAUSE_SAFELEN:
clauses = cp_parser_omp_clause_safelen (parser, clauses,
token->location);
c_name = "safelen";
break;
case PRAGMA_OMP_CLAUSE_SIMDLEN:
clauses = cp_parser_omp_clause_simdlen (parser, clauses,
token->location);
c_name = "simdlen";
break;
case PRAGMA_CILK_CLAUSE_VECTORLENGTH:
clauses = cp_parser_cilk_simd_vectorlength (parser, clauses, true);
c_name = "simdlen";
break;
default:
cp_parser_error (parser, "expected %<#pragma omp%> clause");
goto saw_error;
}
first = false;
if (((mask >> c_kind) & 1) == 0)
{
/* Remove the invalid clause(s) from the list to avoid
confusing the rest of the compiler. */
clauses = prev;
error_at (token->location, "%qs is not valid for %qs", c_name, where);
}
}
saw_error:
/* In Cilk Plus SIMD enabled functions there is no pragma_token, so
no reason to skip to the end. */
if (!(flag_cilkplus && pragma_tok == NULL))
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
if (finish_p)
return finish_omp_clauses (clauses);
return clauses;
}
/* OpenMP 2.5:
structured-block:
statement
In practice, we're also interested in adding the statement to an
outer node. So it is convenient if we work around the fact that
cp_parser_statement calls add_stmt. */
static unsigned
cp_parser_begin_omp_structured_block (cp_parser *parser)
{
unsigned save = parser->in_statement;
/* Only move the values to IN_OMP_BLOCK if they weren't false.
This preserves the "not within loop or switch" style error messages
for nonsense cases like
void foo() {
#pragma omp single
break;
}
*/
if (parser->in_statement)
parser->in_statement = IN_OMP_BLOCK;
return save;
}
static void
cp_parser_end_omp_structured_block (cp_parser *parser, unsigned save)
{
parser->in_statement = save;
}
static tree
cp_parser_omp_structured_block (cp_parser *parser)
{
tree stmt = begin_omp_structured_block ();
unsigned int save = cp_parser_begin_omp_structured_block (parser);
cp_parser_statement (parser, NULL_TREE, false, NULL);
cp_parser_end_omp_structured_block (parser, save);
return finish_omp_structured_block (stmt);
}
/* OpenMP 2.5:
# pragma omp atomic new-line
expression-stmt
expression-stmt:
x binop= expr | x++ | ++x | x-- | --x
binop:
+, *, -, /, &, ^, |, <<, >>
where x is an lvalue expression with scalar type.
OpenMP 3.1:
# pragma omp atomic new-line
update-stmt
# pragma omp atomic read new-line
read-stmt
# pragma omp atomic write new-line
write-stmt
# pragma omp atomic update new-line
update-stmt
# pragma omp atomic capture new-line
capture-stmt
# pragma omp atomic capture new-line
capture-block
read-stmt:
v = x
write-stmt:
x = expr
update-stmt:
expression-stmt | x = x binop expr
capture-stmt:
v = expression-stmt
capture-block:
{ v = x; update-stmt; } | { update-stmt; v = x; }
OpenMP 4.0:
update-stmt:
expression-stmt | x = x binop expr | x = expr binop x
capture-stmt:
v = update-stmt
capture-block:
{ v = x; update-stmt; } | { update-stmt; v = x; } | { v = x; x = expr; }
where x and v are lvalue expressions with scalar type. */
static void
cp_parser_omp_atomic (cp_parser *parser, cp_token *pragma_tok)
{
tree lhs = NULL_TREE, rhs = NULL_TREE, v = NULL_TREE, lhs1 = NULL_TREE;
tree rhs1 = NULL_TREE, orig_lhs;
enum tree_code code = OMP_ATOMIC, opcode = NOP_EXPR;
bool structured_block = false;
bool seq_cst = false;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (!strcmp (p, "seq_cst"))
{
seq_cst = true;
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_NAME)
cp_lexer_consume_token (parser->lexer);
}
}
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (!strcmp (p, "read"))
code = OMP_ATOMIC_READ;
else if (!strcmp (p, "write"))
code = NOP_EXPR;
else if (!strcmp (p, "update"))
code = OMP_ATOMIC;
else if (!strcmp (p, "capture"))
code = OMP_ATOMIC_CAPTURE_NEW;
else
p = NULL;
if (p)
cp_lexer_consume_token (parser->lexer);
}
if (!seq_cst)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_NAME)
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (!strcmp (p, "seq_cst"))
{
seq_cst = true;
cp_lexer_consume_token (parser->lexer);
}
}
}
cp_parser_require_pragma_eol (parser, pragma_tok);
switch (code)
{
case OMP_ATOMIC_READ:
case NOP_EXPR: /* atomic write */
v = cp_parser_unary_expression (parser);
if (v == error_mark_node)
goto saw_error;
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
goto saw_error;
if (code == NOP_EXPR)
lhs = cp_parser_expression (parser);
else
lhs = cp_parser_unary_expression (parser);
if (lhs == error_mark_node)
goto saw_error;
if (code == NOP_EXPR)
{
/* atomic write is represented by OMP_ATOMIC with NOP_EXPR
opcode. */
code = OMP_ATOMIC;
rhs = lhs;
lhs = v;
v = NULL_TREE;
}
goto done;
case OMP_ATOMIC_CAPTURE_NEW:
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_lexer_consume_token (parser->lexer);
structured_block = true;
}
else
{
v = cp_parser_unary_expression (parser);
if (v == error_mark_node)
goto saw_error;
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
goto saw_error;
}
default:
break;
}
restart:
lhs = cp_parser_unary_expression (parser);
orig_lhs = lhs;
switch (TREE_CODE (lhs))
{
case ERROR_MARK:
goto saw_error;
case POSTINCREMENT_EXPR:
if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block)
code = OMP_ATOMIC_CAPTURE_OLD;
/* FALLTHROUGH */
case PREINCREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
opcode = PLUS_EXPR;
rhs = integer_one_node;
break;
case POSTDECREMENT_EXPR:
if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block)
code = OMP_ATOMIC_CAPTURE_OLD;
/* FALLTHROUGH */
case PREDECREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
opcode = MINUS_EXPR;
rhs = integer_one_node;
break;
case COMPOUND_EXPR:
if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR
&& TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR
&& TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0)
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND
(TREE_OPERAND (lhs, 1), 0), 0)))
== BOOLEAN_TYPE)
/* Undo effects of boolean_increment for post {in,de}crement. */
lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0);
/* FALLTHRU */
case MODIFY_EXPR:
if (TREE_CODE (lhs) == MODIFY_EXPR
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE)
{
/* Undo effects of boolean_increment. */
if (integer_onep (TREE_OPERAND (lhs, 1)))
{
/* This is pre or post increment. */
rhs = TREE_OPERAND (lhs, 1);
lhs = TREE_OPERAND (lhs, 0);
opcode = NOP_EXPR;
if (code == OMP_ATOMIC_CAPTURE_NEW
&& !structured_block
&& TREE_CODE (orig_lhs) == COMPOUND_EXPR)
code = OMP_ATOMIC_CAPTURE_OLD;
break;
}
}
/* FALLTHRU */
default:
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_MULT_EQ:
opcode = MULT_EXPR;
break;
case CPP_DIV_EQ:
opcode = TRUNC_DIV_EXPR;
break;
case CPP_PLUS_EQ:
opcode = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
opcode = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
opcode = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
opcode = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
opcode = BIT_AND_EXPR;
break;
case CPP_OR_EQ:
opcode = BIT_IOR_EXPR;
break;
case CPP_XOR_EQ:
opcode = BIT_XOR_EXPR;
break;
case CPP_EQ:
enum cp_parser_prec oprec;
cp_token *token;
cp_lexer_consume_token (parser->lexer);
cp_parser_parse_tentatively (parser);
rhs1 = cp_parser_simple_cast_expression (parser);
if (rhs1 == error_mark_node)
{
cp_parser_abort_tentative_parse (parser);
cp_parser_simple_cast_expression (parser);
goto saw_error;
}
token = cp_lexer_peek_token (parser->lexer);
if (token->type != CPP_SEMICOLON && !cp_tree_equal (lhs, rhs1))
{
cp_parser_abort_tentative_parse (parser);
cp_parser_parse_tentatively (parser);
rhs = cp_parser_binary_expression (parser, false, true,
PREC_NOT_OPERATOR, NULL);
if (rhs == error_mark_node)
{
cp_parser_abort_tentative_parse (parser);
cp_parser_binary_expression (parser, false, true,
PREC_NOT_OPERATOR, NULL);
goto saw_error;
}
switch (TREE_CODE (rhs))
{
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case RDIV_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
if (cp_tree_equal (lhs, TREE_OPERAND (rhs, 1)))
{
if (cp_parser_parse_definitely (parser))
{
opcode = TREE_CODE (rhs);
rhs1 = TREE_OPERAND (rhs, 0);
rhs = TREE_OPERAND (rhs, 1);
goto stmt_done;
}
else
goto saw_error;
}
break;
default:
break;
}
cp_parser_abort_tentative_parse (parser);
if (structured_block && code == OMP_ATOMIC_CAPTURE_OLD)
{
rhs = cp_parser_expression (parser);
if (rhs == error_mark_node)
goto saw_error;
opcode = NOP_EXPR;
rhs1 = NULL_TREE;
goto stmt_done;
}
cp_parser_error (parser,
"invalid form of %<#pragma omp atomic%>");
goto saw_error;
}
if (!cp_parser_parse_definitely (parser))
goto saw_error;
switch (token->type)
{
case CPP_SEMICOLON:
if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW)
{
code = OMP_ATOMIC_CAPTURE_OLD;
v = lhs;
lhs = NULL_TREE;
lhs1 = rhs1;
rhs1 = NULL_TREE;
cp_lexer_consume_token (parser->lexer);
goto restart;
}
else if (structured_block)
{
opcode = NOP_EXPR;
rhs = rhs1;
rhs1 = NULL_TREE;
goto stmt_done;
}
cp_parser_error (parser,
"invalid form of %<#pragma omp atomic%>");
goto saw_error;
case CPP_MULT:
opcode = MULT_EXPR;
break;
case CPP_DIV:
opcode = TRUNC_DIV_EXPR;
break;
case CPP_PLUS:
opcode = PLUS_EXPR;
break;
case CPP_MINUS:
opcode = MINUS_EXPR;
break;
case CPP_LSHIFT:
opcode = LSHIFT_EXPR;
break;
case CPP_RSHIFT:
opcode = RSHIFT_EXPR;
break;
case CPP_AND:
opcode = BIT_AND_EXPR;
break;
case CPP_OR:
opcode = BIT_IOR_EXPR;
break;
case CPP_XOR:
opcode = BIT_XOR_EXPR;
break;
default:
cp_parser_error (parser,
"invalid operator for %<#pragma omp atomic%>");
goto saw_error;
}
oprec = TOKEN_PRECEDENCE (token);
gcc_assert (oprec != PREC_NOT_OPERATOR);
if (commutative_tree_code (opcode))
oprec = (enum cp_parser_prec) (oprec - 1);
cp_lexer_consume_token (parser->lexer);
rhs = cp_parser_binary_expression (parser, false, false,
oprec, NULL);
if (rhs == error_mark_node)
goto saw_error;
goto stmt_done;
/* FALLTHROUGH */
default:
cp_parser_error (parser,
"invalid operator for %<#pragma omp atomic%>");
goto saw_error;
}
cp_lexer_consume_token (parser->lexer);
rhs = cp_parser_expression (parser);
if (rhs == error_mark_node)
goto saw_error;
break;
}
stmt_done:
if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW)
{
if (!cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON))
goto saw_error;
v = cp_parser_unary_expression (parser);
if (v == error_mark_node)
goto saw_error;
if (!cp_parser_require (parser, CPP_EQ, RT_EQ))
goto saw_error;
lhs1 = cp_parser_unary_expression (parser);
if (lhs1 == error_mark_node)
goto saw_error;
}
if (structured_block)
{
cp_parser_consume_semicolon_at_end_of_statement (parser);
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
}
done:
finish_omp_atomic (code, opcode, lhs, rhs, v, lhs1, rhs1, seq_cst);
if (!structured_block)
cp_parser_consume_semicolon_at_end_of_statement (parser);
return;
saw_error:
cp_parser_skip_to_end_of_block_or_statement (parser);
if (structured_block)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
cp_lexer_consume_token (parser->lexer);
else if (code == OMP_ATOMIC_CAPTURE_NEW)
{
cp_parser_skip_to_end_of_block_or_statement (parser);
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
cp_lexer_consume_token (parser->lexer);
}
}
}
/* OpenMP 2.5:
# pragma omp barrier new-line */
static void
cp_parser_omp_barrier (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_barrier ();
}
/* OpenMP 2.5:
# pragma omp critical [(name)] new-line
structured-block */
static tree
cp_parser_omp_critical (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt, name = NULL;
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
{
cp_lexer_consume_token (parser->lexer);
name = cp_parser_identifier (parser);
if (name == error_mark_node
|| !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
if (name == error_mark_node)
name = NULL;
}
cp_parser_require_pragma_eol (parser, pragma_tok);
stmt = cp_parser_omp_structured_block (parser);
return c_finish_omp_critical (input_location, stmt, name);
}
/* OpenMP 2.5:
# pragma omp flush flush-vars[opt] new-line
flush-vars:
( variable-list ) */
static void
cp_parser_omp_flush (cp_parser *parser, cp_token *pragma_tok)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN))
(void) cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL);
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_flush ();
}
/* Helper function, to parse omp for increment expression. */
static tree
cp_parser_omp_for_cond (cp_parser *parser, tree decl, enum tree_code code)
{
tree cond = cp_parser_binary_expression (parser, false, true,
PREC_NOT_OPERATOR, NULL);
if (cond == error_mark_node
|| cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
{
cp_parser_skip_to_end_of_statement (parser);
return error_mark_node;
}
switch (TREE_CODE (cond))
{
case GT_EXPR:
case GE_EXPR:
case LT_EXPR:
case LE_EXPR:
break;
case NE_EXPR:
if (code == CILK_SIMD || code == CILK_FOR)
break;
/* Fall through: OpenMP disallows NE_EXPR. */
default:
return error_mark_node;
}
/* If decl is an iterator, preserve LHS and RHS of the relational
expr until finish_omp_for. */
if (decl
&& (type_dependent_expression_p (decl)
|| CLASS_TYPE_P (TREE_TYPE (decl))))
return cond;
return build_x_binary_op (input_location, TREE_CODE (cond),
TREE_OPERAND (cond, 0), ERROR_MARK,
TREE_OPERAND (cond, 1), ERROR_MARK,
/*overload=*/NULL, tf_warning_or_error);
}
/* Helper function, to parse omp for increment expression. */
static tree
cp_parser_omp_for_incr (cp_parser *parser, tree decl)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
enum tree_code op;
tree lhs, rhs;
cp_id_kind idk;
bool decl_first;
if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS)
{
op = (token->type == CPP_PLUS_PLUS
? PREINCREMENT_EXPR : PREDECREMENT_EXPR);
cp_lexer_consume_token (parser->lexer);
lhs = cp_parser_simple_cast_expression (parser);
if (lhs != decl)
return error_mark_node;
return build2 (op, TREE_TYPE (decl), decl, NULL_TREE);
}
lhs = cp_parser_primary_expression (parser, false, false, false, &idk);
if (lhs != decl)
return error_mark_node;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS)
{
op = (token->type == CPP_PLUS_PLUS
? POSTINCREMENT_EXPR : POSTDECREMENT_EXPR);
cp_lexer_consume_token (parser->lexer);
return build2 (op, TREE_TYPE (decl), decl, NULL_TREE);
}
op = cp_parser_assignment_operator_opt (parser);
if (op == ERROR_MARK)
return error_mark_node;
if (op != NOP_EXPR)
{
rhs = cp_parser_assignment_expression (parser);
rhs = build2 (op, TREE_TYPE (decl), decl, rhs);
return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs);
}
lhs = cp_parser_binary_expression (parser, false, false,
PREC_ADDITIVE_EXPRESSION, NULL);
token = cp_lexer_peek_token (parser->lexer);
decl_first = lhs == decl;
if (decl_first)
lhs = NULL_TREE;
if (token->type != CPP_PLUS
&& token->type != CPP_MINUS)
return error_mark_node;
do
{
op = token->type == CPP_PLUS ? PLUS_EXPR : MINUS_EXPR;
cp_lexer_consume_token (parser->lexer);
rhs = cp_parser_binary_expression (parser, false, false,
PREC_ADDITIVE_EXPRESSION, NULL);
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_PLUS || token->type == CPP_MINUS || decl_first)
{
if (lhs == NULL_TREE)
{
if (op == PLUS_EXPR)
lhs = rhs;
else
lhs = build_x_unary_op (input_location, NEGATE_EXPR, rhs,
tf_warning_or_error);
}
else
lhs = build_x_binary_op (input_location, op, lhs, ERROR_MARK, rhs,
ERROR_MARK, NULL, tf_warning_or_error);
}
}
while (token->type == CPP_PLUS || token->type == CPP_MINUS);
if (!decl_first)
{
if (rhs != decl || op == MINUS_EXPR)
return error_mark_node;
rhs = build2 (op, TREE_TYPE (decl), lhs, decl);
}
else
rhs = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, lhs);
return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs);
}
/* Parse the initialization statement of either an OpenMP for loop or
a Cilk Plus for loop.
Return true if the resulting construct should have an
OMP_CLAUSE_PRIVATE added to it. */
static bool
cp_parser_omp_for_loop_init (cp_parser *parser,
enum tree_code code,
tree &this_pre_body,
vec<tree, va_gc> *for_block,
tree &init,
tree &decl,
tree &real_decl)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
return false;
bool add_private_clause = false;
/* See 2.5.1 (in OpenMP 3.0, similar wording is in 2.5 standard too):
init-expr:
var = lb
integer-type var = lb
random-access-iterator-type var = lb
pointer-type var = lb
*/
cp_decl_specifier_seq type_specifiers;
/* First, try to parse as an initialized declaration. See
cp_parser_condition, from whence the bulk of this is copied. */
cp_parser_parse_tentatively (parser);
cp_parser_type_specifier_seq (parser, /*is_declaration=*/true,
/*is_trailing_return=*/false,
&type_specifiers);
if (cp_parser_parse_definitely (parser))
{
/* If parsing a type specifier seq succeeded, then this
MUST be a initialized declaration. */
tree asm_specification, attributes;
cp_declarator *declarator;
declarator = cp_parser_declarator (parser,
CP_PARSER_DECLARATOR_NAMED,
/*ctor_dtor_or_conv_p=*/NULL,
/*parenthesized_p=*/NULL,
/*member_p=*/false,
/*friend_p=*/false);
attributes = cp_parser_attributes_opt (parser);
asm_specification = cp_parser_asm_specification_opt (parser);
if (declarator == cp_error_declarator)
cp_parser_skip_to_end_of_statement (parser);
else
{
tree pushed_scope, auto_node;
decl = start_decl (declarator, &type_specifiers,
SD_INITIALIZED, attributes,
/*prefix_attributes=*/NULL_TREE,
&pushed_scope);
auto_node = type_uses_auto (TREE_TYPE (decl));
if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ))
{
if (cp_lexer_next_token_is (parser->lexer,
CPP_OPEN_PAREN))
{
if (code != CILK_SIMD && code != CILK_FOR)
error ("parenthesized initialization is not allowed in "
"OpenMP %<for%> loop");
else
error ("parenthesized initialization is "
"not allowed in for-loop");
}
else
/* Trigger an error. */
cp_parser_require (parser, CPP_EQ, RT_EQ);
init = error_mark_node;
cp_parser_skip_to_end_of_statement (parser);
}
else if (CLASS_TYPE_P (TREE_TYPE (decl))
|| type_dependent_expression_p (decl)
|| auto_node)
{
bool is_direct_init, is_non_constant_init;
init = cp_parser_initializer (parser,
&is_direct_init,
&is_non_constant_init);
if (auto_node)
{
TREE_TYPE (decl)
= do_auto_deduction (TREE_TYPE (decl), init,
auto_node);
if (!CLASS_TYPE_P (TREE_TYPE (decl))
&& !type_dependent_expression_p (decl))
goto non_class;
}
cp_finish_decl (decl, init, !is_non_constant_init,
asm_specification,
LOOKUP_ONLYCONVERTING);
if (CLASS_TYPE_P (TREE_TYPE (decl)))
{
vec_safe_push (for_block, this_pre_body);
init = NULL_TREE;
}
else
init = pop_stmt_list (this_pre_body);
this_pre_body = NULL_TREE;
}
else
{
/* Consume '='. */
cp_lexer_consume_token (parser->lexer);
init = cp_parser_assignment_expression (parser);
non_class:
if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE)
init = error_mark_node;
else
cp_finish_decl (decl, NULL_TREE,
/*init_const_expr_p=*/false,
asm_specification,
LOOKUP_ONLYCONVERTING);
}
if (pushed_scope)
pop_scope (pushed_scope);
}
}
else
{
cp_id_kind idk;
/* If parsing a type specifier sequence failed, then
this MUST be a simple expression. */
if (code == CILK_FOR)
error ("%<_Cilk_for%> allows expression instead of declaration only "
"in C, not in C++");
cp_parser_parse_tentatively (parser);
decl = cp_parser_primary_expression (parser, false, false,
false, &idk);
if (!cp_parser_error_occurred (parser)
&& decl
&& DECL_P (decl)
&& CLASS_TYPE_P (TREE_TYPE (decl)))
{
tree rhs;
cp_parser_parse_definitely (parser);
cp_parser_require (parser, CPP_EQ, RT_EQ);
rhs = cp_parser_assignment_expression (parser);
finish_expr_stmt (build_x_modify_expr (EXPR_LOCATION (rhs),
decl, NOP_EXPR,
rhs,
tf_warning_or_error));
add_private_clause = true;
}
else
{
decl = NULL;
cp_parser_abort_tentative_parse (parser);
init = cp_parser_expression (parser);
if (init)
{
if (TREE_CODE (init) == MODIFY_EXPR
|| TREE_CODE (init) == MODOP_EXPR)
real_decl = TREE_OPERAND (init, 0);
}
}
}
return add_private_clause;
}
/* Parse the restricted form of the for statement allowed by OpenMP. */
static tree
cp_parser_omp_for_loop (cp_parser *parser, enum tree_code code, tree clauses,
tree *cclauses)
{
tree init, cond, incr, body, decl, pre_body = NULL_TREE, ret;
tree real_decl, initv, condv, incrv, declv;
tree this_pre_body, cl;
location_t loc_first;
bool collapse_err = false;
int i, collapse = 1, nbraces = 0;
vec<tree, va_gc> *for_block = make_tree_vector ();
for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl))
if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE)
collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (cl));
gcc_assert (collapse >= 1);
declv = make_tree_vec (collapse);
initv = make_tree_vec (collapse);
condv = make_tree_vec (collapse);
incrv = make_tree_vec (collapse);
loc_first = cp_lexer_peek_token (parser->lexer)->location;
for (i = 0; i < collapse; i++)
{
int bracecount = 0;
bool add_private_clause = false;
location_t loc;
if (code != CILK_FOR
&& !cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
{
cp_parser_error (parser, "for statement expected");
return NULL;
}
if (code == CILK_FOR
&& !cp_lexer_next_token_is_keyword (parser->lexer, RID_CILK_FOR))
{
cp_parser_error (parser, "_Cilk_for statement expected");
return NULL;
}
loc = cp_lexer_consume_token (parser->lexer)->location;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return NULL;
init = decl = real_decl = NULL;
this_pre_body = push_stmt_list ();
add_private_clause
|= cp_parser_omp_for_loop_init (parser, code,
this_pre_body, for_block,
init, decl, real_decl);
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (this_pre_body)
{
this_pre_body = pop_stmt_list (this_pre_body);
if (pre_body)
{
tree t = pre_body;
pre_body = push_stmt_list ();
add_stmt (t);
add_stmt (this_pre_body);
pre_body = pop_stmt_list (pre_body);
}
else
pre_body = this_pre_body;
}
if (decl)
real_decl = decl;
if (cclauses != NULL
&& cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] != NULL
&& real_decl != NULL_TREE)
{
tree *c;
for (c = &cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; *c ; )
if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_DECL (*c) == real_decl)
{
error_at (loc, "iteration variable %qD"
" should not be firstprivate", real_decl);
*c = OMP_CLAUSE_CHAIN (*c);
}
else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_DECL (*c) == real_decl)
{
/* Add lastprivate (decl) clause to OMP_FOR_CLAUSES,
change it to shared (decl) in OMP_PARALLEL_CLAUSES. */
tree l = build_omp_clause (loc, OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (l) = real_decl;
CP_OMP_CLAUSE_INFO (l) = CP_OMP_CLAUSE_INFO (*c);
if (code == OMP_SIMD)
{
OMP_CLAUSE_CHAIN (l) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
cclauses[C_OMP_CLAUSE_SPLIT_FOR] = l;
}
else
{
OMP_CLAUSE_CHAIN (l) = clauses;
clauses = l;
}
OMP_CLAUSE_SET_CODE (*c, OMP_CLAUSE_SHARED);
CP_OMP_CLAUSE_INFO (*c) = NULL;
add_private_clause = false;
}
else
{
if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_DECL (*c) == real_decl)
add_private_clause = false;
c = &OMP_CLAUSE_CHAIN (*c);
}
}
if (add_private_clause)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
&& OMP_CLAUSE_DECL (c) == decl)
break;
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_DECL (c) == decl)
error_at (loc, "iteration variable %qD "
"should not be firstprivate",
decl);
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_DECL (c) == decl)
error_at (loc, "iteration variable %qD should not be reduction",
decl);
}
if (c == NULL)
{
c = build_omp_clause (loc, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_DECL (c) = decl;
c = finish_omp_clauses (c);
if (c)
{
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
}
}
cond = NULL;
if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON))
cond = cp_parser_omp_for_cond (parser, decl, code);
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
incr = NULL;
if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN))
{
/* If decl is an iterator, preserve the operator on decl
until finish_omp_for. */
if (real_decl
&& ((processing_template_decl
&& !POINTER_TYPE_P (TREE_TYPE (real_decl)))
|| CLASS_TYPE_P (TREE_TYPE (real_decl))))
incr = cp_parser_omp_for_incr (parser, real_decl);
else
incr = cp_parser_expression (parser);
if (CAN_HAVE_LOCATION_P (incr) && !EXPR_HAS_LOCATION (incr))
SET_EXPR_LOCATION (incr, input_location);
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
/*or_comma=*/false,
/*consume_paren=*/true);
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
if (i == collapse - 1)
break;
/* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed
in between the collapsed for loops to be still considered perfectly
nested. Hopefully the final version clarifies this.
For now handle (multiple) {'s and empty statements. */
cp_parser_parse_tentatively (parser);
do
{
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
break;
else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE))
{
cp_lexer_consume_token (parser->lexer);
bracecount++;
}
else if (bracecount
&& cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
else
{
loc = cp_lexer_peek_token (parser->lexer)->location;
error_at (loc, "not enough collapsed for loops");
collapse_err = true;
cp_parser_abort_tentative_parse (parser);
declv = NULL_TREE;
break;
}
}
while (1);
if (declv)
{
cp_parser_parse_definitely (parser);
nbraces += bracecount;
}
}
/* Note that we saved the original contents of this flag when we entered
the structured block, and so we don't need to re-save it here. */
if (code == CILK_SIMD || code == CILK_FOR)
parser->in_statement = IN_CILK_SIMD_FOR;
else
parser->in_statement = IN_OMP_FOR;
/* Note that the grammar doesn't call for a structured block here,
though the loop as a whole is a structured block. */
body = push_stmt_list ();
cp_parser_statement (parser, NULL_TREE, false, NULL);
body = pop_stmt_list (body);
if (declv == NULL_TREE)
ret = NULL_TREE;
else
ret = finish_omp_for (loc_first, code, declv, initv, condv, incrv, body,
pre_body, clauses);
while (nbraces)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE))
{
cp_lexer_consume_token (parser->lexer);
nbraces--;
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))
cp_lexer_consume_token (parser->lexer);
else
{
if (!collapse_err)
{
error_at (cp_lexer_peek_token (parser->lexer)->location,
"collapsed loops not perfectly nested");
}
collapse_err = true;
cp_parser_statement_seq_opt (parser, NULL);
if (cp_lexer_next_token_is (parser->lexer, CPP_EOF))
break;
}
}
while (!for_block->is_empty ())
add_stmt (pop_stmt_list (for_block->pop ()));
release_tree_vector (for_block);
return ret;
}
/* Helper function for OpenMP parsing, split clauses and call
finish_omp_clauses on each of the set of clauses afterwards. */
static void
cp_omp_split_clauses (location_t loc, enum tree_code code,
omp_clause_mask mask, tree clauses, tree *cclauses)
{
int i;
c_omp_split_clauses (loc, code, mask, clauses, cclauses);
for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
if (cclauses[i])
cclauses[i] = finish_omp_clauses (cclauses[i]);
}
/* OpenMP 4.0:
#pragma omp simd simd-clause[optseq] new-line
for-loop */
#define OMP_SIMD_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SAFELEN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALIGNED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
cp_parser_omp_simd (cp_parser *parser, cp_token *pragma_tok,
char *p_name, omp_clause_mask mask, tree *cclauses)
{
tree clauses, sb, ret;
unsigned int save;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
strcat (p_name, " simd");
mask |= OMP_SIMD_CLAUSE_MASK;
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED);
clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok,
cclauses == NULL);
if (cclauses)
{
cp_omp_split_clauses (loc, OMP_SIMD, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
}
sb = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
ret = cp_parser_omp_for_loop (parser, OMP_SIMD, clauses, cclauses);
cp_parser_end_omp_structured_block (parser, save);
add_stmt (finish_omp_structured_block (sb));
return ret;
}
/* OpenMP 2.5:
#pragma omp for for-clause[optseq] new-line
for-loop
OpenMP 4.0:
#pragma omp for simd for-simd-clause[optseq] new-line
for-loop */
#define OMP_FOR_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
cp_parser_omp_for (cp_parser *parser, cp_token *pragma_tok,
char *p_name, omp_clause_mask mask, tree *cclauses)
{
tree clauses, sb, ret;
unsigned int save;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
strcat (p_name, " for");
mask |= OMP_FOR_CLAUSE_MASK;
if (cclauses)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT);
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "simd") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
cp_lexer_consume_token (parser->lexer);
if (!flag_openmp) /* flag_openmp_simd */
return cp_parser_omp_simd (parser, pragma_tok, p_name, mask,
cclauses);
sb = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
ret = cp_parser_omp_simd (parser, pragma_tok, p_name, mask,
cclauses);
cp_parser_end_omp_structured_block (parser, save);
tree body = finish_omp_structured_block (sb);
if (ret == NULL)
return ret;
ret = make_node (OMP_FOR);
TREE_TYPE (ret) = void_type_node;
OMP_FOR_BODY (ret) = body;
OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
SET_EXPR_LOCATION (ret, loc);
add_stmt (ret);
return ret;
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return NULL_TREE;
}
clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok,
cclauses == NULL);
if (cclauses)
{
cp_omp_split_clauses (loc, OMP_FOR, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
}
sb = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
ret = cp_parser_omp_for_loop (parser, OMP_FOR, clauses, cclauses);
cp_parser_end_omp_structured_block (parser, save);
add_stmt (finish_omp_structured_block (sb));
return ret;
}
/* OpenMP 2.5:
# pragma omp master new-line
structured-block */
static tree
cp_parser_omp_master (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
return c_finish_omp_master (input_location,
cp_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
# pragma omp ordered new-line
structured-block */
static tree
cp_parser_omp_ordered (cp_parser *parser, cp_token *pragma_tok)
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
cp_parser_require_pragma_eol (parser, pragma_tok);
return c_finish_omp_ordered (loc, cp_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
section-scope:
{ section-sequence }
section-sequence:
section-directive[opt] structured-block
section-sequence section-directive structured-block */
static tree
cp_parser_omp_sections_scope (cp_parser *parser)
{
tree stmt, substmt;
bool error_suppress = false;
cp_token *tok;
if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE))
return NULL_TREE;
stmt = push_stmt_list ();
if (cp_lexer_peek_token (parser->lexer)->pragma_kind != PRAGMA_OMP_SECTION)
{
substmt = cp_parser_omp_structured_block (parser);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
add_stmt (substmt);
}
while (1)
{
tok = cp_lexer_peek_token (parser->lexer);
if (tok->type == CPP_CLOSE_BRACE)
break;
if (tok->type == CPP_EOF)
break;
if (tok->pragma_kind == PRAGMA_OMP_SECTION)
{
cp_lexer_consume_token (parser->lexer);
cp_parser_require_pragma_eol (parser, tok);
error_suppress = false;
}
else if (!error_suppress)
{
cp_parser_error (parser, "expected %<#pragma omp section%> or %<}%>");
error_suppress = true;
}
substmt = cp_parser_omp_structured_block (parser);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
add_stmt (substmt);
}
cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE);
substmt = pop_stmt_list (stmt);
stmt = make_node (OMP_SECTIONS);
TREE_TYPE (stmt) = void_type_node;
OMP_SECTIONS_BODY (stmt) = substmt;
add_stmt (stmt);
return stmt;
}
/* OpenMP 2.5:
# pragma omp sections sections-clause[optseq] newline
sections-scope */
#define OMP_SECTIONS_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
cp_parser_omp_sections (cp_parser *parser, cp_token *pragma_tok,
char *p_name, omp_clause_mask mask, tree *cclauses)
{
tree clauses, ret;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
strcat (p_name, " sections");
mask |= OMP_SECTIONS_CLAUSE_MASK;
if (cclauses)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT);
clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok,
cclauses == NULL);
if (cclauses)
{
cp_omp_split_clauses (loc, OMP_SECTIONS, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS];
}
ret = cp_parser_omp_sections_scope (parser);
if (ret)
OMP_SECTIONS_CLAUSES (ret) = clauses;
return ret;
}
/* OpenMP 2.5:
# pragma omp parallel parallel-clause[optseq] new-line
structured-block
# pragma omp parallel for parallel-for-clause[optseq] new-line
structured-block
# pragma omp parallel sections parallel-sections-clause[optseq] new-line
structured-block
OpenMP 4.0:
# pragma omp parallel for simd parallel-for-simd-clause[optseq] new-line
structured-block */
#define OMP_PARALLEL_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PROC_BIND))
static tree
cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok,
char *p_name, omp_clause_mask mask, tree *cclauses)
{
tree stmt, clauses, block;
unsigned int save;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
strcat (p_name, " parallel");
mask |= OMP_PARALLEL_CLAUSE_MASK;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR))
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
cp_lexer_consume_token (parser->lexer);
if (!flag_openmp) /* flag_openmp_simd */
return cp_parser_omp_for (parser, pragma_tok, p_name, mask, cclauses);
block = begin_omp_parallel ();
save = cp_parser_begin_omp_structured_block (parser);
tree ret = cp_parser_omp_for (parser, pragma_tok, p_name, mask, cclauses);
cp_parser_end_omp_structured_block (parser, save);
stmt = finish_omp_parallel (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL],
block);
if (ret == NULL_TREE)
return ret;
OMP_PARALLEL_COMBINED (stmt) = 1;
return stmt;
}
else if (cclauses)
{
error_at (loc, "expected %<for%> after %qs", p_name);
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return NULL_TREE;
}
else if (!flag_openmp) /* flag_openmp_simd */
{
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return NULL_TREE;
}
else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "sections") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
cclauses = cclauses_buf;
cp_lexer_consume_token (parser->lexer);
block = begin_omp_parallel ();
save = cp_parser_begin_omp_structured_block (parser);
cp_parser_omp_sections (parser, pragma_tok, p_name, mask, cclauses);
cp_parser_end_omp_structured_block (parser, save);
stmt = finish_omp_parallel (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL],
block);
OMP_PARALLEL_COMBINED (stmt) = 1;
return stmt;
}
}
clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok);
block = begin_omp_parallel ();
save = cp_parser_begin_omp_structured_block (parser);
cp_parser_statement (parser, NULL_TREE, false, NULL);
cp_parser_end_omp_structured_block (parser, save);
stmt = finish_omp_parallel (clauses, block);
return stmt;
}
/* OpenMP 2.5:
# pragma omp single single-clause[optseq] new-line
structured-block */
#define OMP_SINGLE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
cp_parser_omp_single (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt = make_node (OMP_SINGLE);
TREE_TYPE (stmt) = void_type_node;
OMP_SINGLE_CLAUSES (stmt)
= cp_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK,
"#pragma omp single", pragma_tok);
OMP_SINGLE_BODY (stmt) = cp_parser_omp_structured_block (parser);
return add_stmt (stmt);
}
/* OpenMP 3.0:
# pragma omp task task-clause[optseq] new-line
structured-block */
#define OMP_TASK_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNTIED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FINAL) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND))
static tree
cp_parser_omp_task (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses, block;
unsigned int save;
clauses = cp_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK,
"#pragma omp task", pragma_tok);
block = begin_omp_task ();
save = cp_parser_begin_omp_structured_block (parser);
cp_parser_statement (parser, NULL_TREE, false, NULL);
cp_parser_end_omp_structured_block (parser, save);
return finish_omp_task (clauses, block);
}
/* OpenMP 3.0:
# pragma omp taskwait new-line */
static void
cp_parser_omp_taskwait (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_taskwait ();
}
/* OpenMP 3.1:
# pragma omp taskyield new-line */
static void
cp_parser_omp_taskyield (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_taskyield ();
}
/* OpenMP 4.0:
# pragma omp taskgroup new-line
structured-block */
static tree
cp_parser_omp_taskgroup (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_require_pragma_eol (parser, pragma_tok);
return c_finish_omp_taskgroup (input_location,
cp_parser_omp_structured_block (parser));
}
/* OpenMP 2.5:
# pragma omp threadprivate (variable-list) */
static void
cp_parser_omp_threadprivate (cp_parser *parser, cp_token *pragma_tok)
{
tree vars;
vars = cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL);
cp_parser_require_pragma_eol (parser, pragma_tok);
finish_omp_threadprivate (vars);
}
/* OpenMP 4.0:
# pragma omp cancel cancel-clause[optseq] new-line */
#define OMP_CANCEL_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PARALLEL) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FOR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SECTIONS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASKGROUP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF))
static void
cp_parser_omp_cancel (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses = cp_parser_omp_all_clauses (parser, OMP_CANCEL_CLAUSE_MASK,
"#pragma omp cancel", pragma_tok);
finish_omp_cancel (clauses);
}
/* OpenMP 4.0:
# pragma omp cancellation point cancelpt-clause[optseq] new-line */
#define OMP_CANCELLATION_POINT_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PARALLEL) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FOR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SECTIONS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASKGROUP))
static void
cp_parser_omp_cancellation_point (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses;
bool point_seen = false;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "point") == 0)
{
cp_lexer_consume_token (parser->lexer);
point_seen = true;
}
}
if (!point_seen)
{
cp_parser_error (parser, "expected %<point%>");
cp_parser_require_pragma_eol (parser, pragma_tok);
return;
}
clauses = cp_parser_omp_all_clauses (parser,
OMP_CANCELLATION_POINT_CLAUSE_MASK,
"#pragma omp cancellation point",
pragma_tok);
finish_omp_cancellation_point (clauses);
}
/* OpenMP 4.0:
#pragma omp distribute distribute-clause[optseq] new-line
for-loop */
#define OMP_DISTRIBUTE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)\
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
cp_parser_omp_distribute (cp_parser *parser, cp_token *pragma_tok,
char *p_name, omp_clause_mask mask, tree *cclauses)
{
tree clauses, sb, ret;
unsigned int save;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
strcat (p_name, " distribute");
mask |= OMP_DISTRIBUTE_CLAUSE_MASK;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
bool simd = false;
bool parallel = false;
if (strcmp (p, "simd") == 0)
simd = true;
else
parallel = strcmp (p, "parallel") == 0;
if (parallel || simd)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
cp_lexer_consume_token (parser->lexer);
if (!flag_openmp) /* flag_openmp_simd */
{
if (simd)
return cp_parser_omp_simd (parser, pragma_tok, p_name, mask,
cclauses);
else
return cp_parser_omp_parallel (parser, pragma_tok, p_name, mask,
cclauses);
}
sb = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
if (simd)
ret = cp_parser_omp_simd (parser, pragma_tok, p_name, mask,
cclauses);
else
ret = cp_parser_omp_parallel (parser, pragma_tok, p_name, mask,
cclauses);
cp_parser_end_omp_structured_block (parser, save);
tree body = finish_omp_structured_block (sb);
if (ret == NULL)
return ret;
ret = make_node (OMP_DISTRIBUTE);
TREE_TYPE (ret) = void_type_node;
OMP_FOR_BODY (ret) = body;
OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
SET_EXPR_LOCATION (ret, loc);
add_stmt (ret);
return ret;
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return NULL_TREE;
}
clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok,
cclauses == NULL);
if (cclauses)
{
cp_omp_split_clauses (loc, OMP_DISTRIBUTE, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
}
sb = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
ret = cp_parser_omp_for_loop (parser, OMP_DISTRIBUTE, clauses, NULL);
cp_parser_end_omp_structured_block (parser, save);
add_stmt (finish_omp_structured_block (sb));
return ret;
}
/* OpenMP 4.0:
# pragma omp teams teams-clause[optseq] new-line
structured-block */
#define OMP_TEAMS_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREAD_LIMIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT))
static tree
cp_parser_omp_teams (cp_parser *parser, cp_token *pragma_tok,
char *p_name, omp_clause_mask mask, tree *cclauses)
{
tree clauses, sb, ret;
unsigned int save;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
strcat (p_name, " teams");
mask |= OMP_TEAMS_CLAUSE_MASK;
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "distribute") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
cp_lexer_consume_token (parser->lexer);
if (!flag_openmp) /* flag_openmp_simd */
return cp_parser_omp_distribute (parser, pragma_tok, p_name, mask,
cclauses);
sb = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
ret = cp_parser_omp_distribute (parser, pragma_tok, p_name, mask,
cclauses);
cp_parser_end_omp_structured_block (parser, save);
tree body = finish_omp_structured_block (sb);
if (ret == NULL)
return ret;
clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
ret = make_node (OMP_TEAMS);
TREE_TYPE (ret) = void_type_node;
OMP_TEAMS_CLAUSES (ret) = clauses;
OMP_TEAMS_BODY (ret) = body;
return add_stmt (ret);
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return NULL_TREE;
}
clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok,
cclauses == NULL);
if (cclauses)
{
cp_omp_split_clauses (loc, OMP_TEAMS, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
}
tree stmt = make_node (OMP_TEAMS);
TREE_TYPE (stmt) = void_type_node;
OMP_TEAMS_CLAUSES (stmt) = clauses;
OMP_TEAMS_BODY (stmt) = cp_parser_omp_structured_block (parser);
return add_stmt (stmt);
}
/* OpenMP 4.0:
# pragma omp target data target-data-clause[optseq] new-line
structured-block */
#define OMP_TARGET_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF))
static tree
cp_parser_omp_target_data (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt = make_node (OMP_TARGET_DATA);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_DATA_CLAUSES (stmt)
= cp_parser_omp_all_clauses (parser, OMP_TARGET_DATA_CLAUSE_MASK,
"#pragma omp target data", pragma_tok);
keep_next_level (true);
OMP_TARGET_DATA_BODY (stmt) = cp_parser_omp_structured_block (parser);
SET_EXPR_LOCATION (stmt, pragma_tok->location);
return add_stmt (stmt);
}
/* OpenMP 4.0:
# pragma omp target update target-update-clause[optseq] new-line */
#define OMP_TARGET_UPDATE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FROM) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TO) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF))
static bool
cp_parser_omp_target_update (cp_parser *parser, cp_token *pragma_tok,
enum pragma_context context)
{
if (context == pragma_stmt)
{
error_at (pragma_tok->location,
"%<#pragma omp target update%> may only be "
"used in compound statements");
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return false;
}
tree clauses
= cp_parser_omp_all_clauses (parser, OMP_TARGET_UPDATE_CLAUSE_MASK,
"#pragma omp target update", pragma_tok);
if (find_omp_clause (clauses, OMP_CLAUSE_TO) == NULL_TREE
&& find_omp_clause (clauses, OMP_CLAUSE_FROM) == NULL_TREE)
{
error_at (pragma_tok->location,
"%<#pragma omp target update must contain at least one "
"%<from%> or %<to%> clauses");
return false;
}
tree stmt = make_node (OMP_TARGET_UPDATE);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_UPDATE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, pragma_tok->location);
add_stmt (stmt);
return false;
}
/* OpenMP 4.0:
# pragma omp target target-clause[optseq] new-line
structured-block */
#define OMP_TARGET_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF))
static bool
cp_parser_omp_target (cp_parser *parser, cp_token *pragma_tok,
enum pragma_context context)
{
if (context != pragma_stmt && context != pragma_compound)
{
cp_parser_error (parser, "expected declaration specifiers");
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return false;
}
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "teams") == 0)
{
tree cclauses[C_OMP_CLAUSE_SPLIT_COUNT];
char p_name[sizeof ("#pragma omp target teams distribute "
"parallel for simd")];
cp_lexer_consume_token (parser->lexer);
strcpy (p_name, "#pragma omp target");
if (!flag_openmp) /* flag_openmp_simd */
{
tree stmt = cp_parser_omp_teams (parser, pragma_tok, p_name,
OMP_TARGET_CLAUSE_MASK,
cclauses);
return stmt != NULL_TREE;
}
keep_next_level (true);
tree sb = begin_omp_structured_block ();
unsigned save = cp_parser_begin_omp_structured_block (parser);
tree ret = cp_parser_omp_teams (parser, pragma_tok, p_name,
OMP_TARGET_CLAUSE_MASK, cclauses);
cp_parser_end_omp_structured_block (parser, save);
tree body = finish_omp_structured_block (sb);
if (ret == NULL_TREE)
return false;
tree stmt = make_node (OMP_TARGET);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_CLAUSES (stmt) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
OMP_TARGET_BODY (stmt) = body;
add_stmt (stmt);
return true;
}
else if (!flag_openmp) /* flag_openmp_simd */
{
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return false;
}
else if (strcmp (p, "data") == 0)
{
cp_lexer_consume_token (parser->lexer);
cp_parser_omp_target_data (parser, pragma_tok);
return true;
}
else if (strcmp (p, "update") == 0)
{
cp_lexer_consume_token (parser->lexer);
return cp_parser_omp_target_update (parser, pragma_tok, context);
}
}
tree stmt = make_node (OMP_TARGET);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_CLAUSES (stmt)
= cp_parser_omp_all_clauses (parser, OMP_TARGET_CLAUSE_MASK,
"#pragma omp target", pragma_tok);
keep_next_level (true);
OMP_TARGET_BODY (stmt) = cp_parser_omp_structured_block (parser);
SET_EXPR_LOCATION (stmt, pragma_tok->location);
add_stmt (stmt);
return true;
}
/* OpenACC 2.0:
# pragma acc cache (variable-list) new-line
*/
static tree
cp_parser_oacc_cache (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt, clauses;
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE__CACHE_, NULL_TREE);
clauses = finish_omp_clauses (clauses);
cp_parser_require_pragma_eol (parser, cp_lexer_peek_token (parser->lexer));
stmt = make_node (OACC_CACHE);
TREE_TYPE (stmt) = void_type_node;
OACC_CACHE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, pragma_tok->location);
add_stmt (stmt);
return stmt;
}
/* OpenACC 2.0:
# pragma acc data oacc-data-clause[optseq] new-line
structured-block */
#define OACC_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE))
static tree
cp_parser_oacc_data (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt, clauses, block;
unsigned int save;
clauses = cp_parser_oacc_all_clauses (parser, OACC_DATA_CLAUSE_MASK,
"#pragma acc data", pragma_tok);
block = begin_omp_parallel ();
save = cp_parser_begin_omp_structured_block (parser);
cp_parser_statement (parser, NULL_TREE, false, NULL);
cp_parser_end_omp_structured_block (parser, save);
stmt = finish_oacc_data (clauses, block);
return stmt;
}
/* OpenACC 2.0:
# pragma acc enter data oacc-enter-data-clause[optseq] new-line
or
# pragma acc exit data oacc-exit-data-clause[optseq] new-line
LOC is the location of the #pragma token.
*/
#define OACC_ENTER_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
#define OACC_EXIT_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DELETE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
static tree
cp_parser_oacc_enter_exit_data (cp_parser *parser, cp_token *pragma_tok,
bool enter)
{
tree stmt, clauses;
if (cp_lexer_next_token_is (parser->lexer, CPP_PRAGMA_EOL)
|| cp_lexer_next_token_is_not (parser->lexer, CPP_NAME))
{
cp_parser_error (parser, enter
? "expected %<data%> in %<#pragma acc enter data%>"
: "expected %<data%> in %<#pragma acc exit data%>");
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return NULL_TREE;
}
const char *p =
IDENTIFIER_POINTER (cp_lexer_peek_token (parser->lexer)->u.value);
if (strcmp (p, "data") != 0)
{
cp_parser_error (parser, "invalid pragma");
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return NULL_TREE;
}
cp_lexer_consume_token (parser->lexer);
if (enter)
clauses = cp_parser_oacc_all_clauses (parser, OACC_ENTER_DATA_CLAUSE_MASK,
"#pragma acc enter data", pragma_tok);
else
clauses = cp_parser_oacc_all_clauses (parser, OACC_EXIT_DATA_CLAUSE_MASK,
"#pragma acc exit data", pragma_tok);
if (find_omp_clause (clauses, OMP_CLAUSE_MAP) == NULL_TREE)
{
error_at (pragma_tok->location,
"%<#pragma acc enter data%> has no data movement clause");
return NULL_TREE;
}
stmt = enter ? make_node (OACC_ENTER_DATA) : make_node (OACC_EXIT_DATA);
TREE_TYPE (stmt) = void_type_node;
if (enter)
OACC_ENTER_DATA_CLAUSES (stmt) = clauses;
else
OACC_EXIT_DATA_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, pragma_tok->location);
add_stmt (stmt);
return stmt;
}
/* OpenACC 2.0:
# pragma acc kernels oacc-kernels-clause[optseq] new-line
structured-block */
#define OACC_KERNELS_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT))
static tree
cp_parser_oacc_kernels (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt, clauses, block;
unsigned int save;
clauses = cp_parser_oacc_all_clauses (parser, OACC_KERNELS_CLAUSE_MASK,
"#pragma acc kernels", pragma_tok);
block = begin_omp_parallel ();
save = cp_parser_begin_omp_structured_block (parser);
cp_parser_statement (parser, NULL_TREE, false, NULL);
cp_parser_end_omp_structured_block (parser, save);
stmt = finish_oacc_kernels (clauses, block);
return stmt;
}
/* OpenACC 2.0:
# pragma acc loop oacc-loop-clause[optseq] new-line
structured-block */
#define OACC_LOOP_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COLLAPSE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION))
static tree
cp_parser_oacc_loop (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt, clauses, block;
int save;
clauses = cp_parser_oacc_all_clauses (parser, OACC_LOOP_CLAUSE_MASK,
"#pragma acc loop", pragma_tok);
block = begin_omp_structured_block ();
save = cp_parser_begin_omp_structured_block (parser);
stmt = cp_parser_omp_for_loop (parser, OACC_LOOP, clauses, NULL);
cp_parser_end_omp_structured_block (parser, save);
add_stmt (finish_omp_structured_block (block));
return stmt;
}
/* OpenACC 2.0:
# pragma acc parallel oacc-parallel-clause[optseq] new-line
structured-block */
#define OACC_PARALLEL_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_GANGS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_WORKERS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR_LENGTH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT))
static tree
cp_parser_oacc_parallel (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt, clauses, block;
unsigned int save;
clauses = cp_parser_oacc_all_clauses (parser, OACC_PARALLEL_CLAUSE_MASK,
"#pragma acc parallel", pragma_tok);
block = begin_omp_parallel ();
save = cp_parser_begin_omp_structured_block (parser);
cp_parser_statement (parser, NULL_TREE, false, NULL);
cp_parser_end_omp_structured_block (parser, save);
stmt = finish_oacc_parallel (clauses, block);
return stmt;
}
/* OpenACC 2.0:
# pragma acc update oacc-update-clause[optseq] new-line
*/
#define OACC_UPDATE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_HOST) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_SELF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT))
static tree
cp_parser_oacc_update (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt, clauses;
clauses = cp_parser_oacc_all_clauses (parser, OACC_UPDATE_CLAUSE_MASK,
"#pragma acc update", pragma_tok);
if (find_omp_clause (clauses, OMP_CLAUSE_MAP) == NULL_TREE)
{
error_at (pragma_tok->location,
"%<#pragma acc update%> must contain at least one "
"%<device%> or %<host/self%> clause");
return NULL_TREE;
}
stmt = make_node (OACC_UPDATE);
TREE_TYPE (stmt) = void_type_node;
OACC_UPDATE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, pragma_tok->location);
add_stmt (stmt);
return stmt;
}
/* OpenACC 2.0:
# pragma acc wait [(intseq)] oacc-wait-clause[optseq] new-line
LOC is the location of the #pragma token.
*/
#define OACC_WAIT_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC))
static tree
cp_parser_oacc_wait (cp_parser *parser, cp_token *pragma_tok)
{
tree clauses, list = NULL_TREE, stmt = NULL_TREE;
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
if (cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN)
list = cp_parser_oacc_wait_list (parser, loc, list);
clauses = cp_parser_oacc_all_clauses (parser, OACC_WAIT_CLAUSE_MASK,
"#pragma acc wait", pragma_tok);
stmt = c_finish_oacc_wait (loc, list, clauses);
return stmt;
}
/* OpenMP 4.0:
# pragma omp declare simd declare-simd-clauses[optseq] new-line */
#define OMP_DECLARE_SIMD_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SIMDLEN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALIGNED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNIFORM) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_INBRANCH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOTINBRANCH))
static void
cp_parser_omp_declare_simd (cp_parser *parser, cp_token *pragma_tok,
enum pragma_context context)
{
bool first_p = parser->omp_declare_simd == NULL;
cp_omp_declare_simd_data data;
if (first_p)
{
data.error_seen = false;
data.fndecl_seen = false;
data.tokens = vNULL;
parser->omp_declare_simd = &data;
}
while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_EOF))
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL))
parser->omp_declare_simd->error_seen = true;
cp_parser_require_pragma_eol (parser, pragma_tok);
struct cp_token_cache *cp
= cp_token_cache_new (pragma_tok, cp_lexer_peek_token (parser->lexer));
parser->omp_declare_simd->tokens.safe_push (cp);
if (first_p)
{
while (cp_lexer_next_token_is (parser->lexer, CPP_PRAGMA))
cp_parser_pragma (parser, context);
switch (context)
{
case pragma_external:
cp_parser_declaration (parser);
break;
case pragma_member:
cp_parser_member_declaration (parser);
break;
case pragma_objc_icode:
cp_parser_block_declaration (parser, /*statement_p=*/false);
break;
default:
cp_parser_declaration_statement (parser);
break;
}
if (parser->omp_declare_simd
&& !parser->omp_declare_simd->error_seen
&& !parser->omp_declare_simd->fndecl_seen)
error_at (pragma_tok->location,
"%<#pragma omp declare simd%> not immediately followed by "
"function declaration or definition");
data.tokens.release ();
parser->omp_declare_simd = NULL;
}
}
/* Handles the delayed parsing of the Cilk Plus SIMD-enabled function.
This function is modelled similar to the late parsing of omp declare
simd. */
static tree
cp_parser_late_parsing_cilk_simd_fn_info (cp_parser *parser, tree attrs)
{
struct cp_token_cache *ce;
cp_omp_declare_simd_data *info = parser->cilk_simd_fn_info;
int ii = 0;
if (parser->omp_declare_simd != NULL)
{
error ("%<#pragma omp declare simd%> cannot be used in the same function"
" marked as a Cilk Plus SIMD-enabled function");
XDELETE (parser->cilk_simd_fn_info);
parser->cilk_simd_fn_info = NULL;
return attrs;
}
if (!info->error_seen && info->fndecl_seen)
{
error ("vector attribute not immediately followed by a single function"
" declaration or definition");
info->error_seen = true;
}
if (info->error_seen)
return attrs;
FOR_EACH_VEC_ELT (info->tokens, ii, ce)
{
tree c, cl;
cp_parser_push_lexer_for_tokens (parser, ce);
parser->lexer->in_pragma = true;
cl = cp_parser_omp_all_clauses (parser, CILK_SIMD_FN_CLAUSE_MASK,
"SIMD-enabled functions attribute",
NULL);
cp_parser_pop_lexer (parser);
if (cl)
cl = tree_cons (NULL_TREE, cl, NULL_TREE);
c = build_tree_list (get_identifier ("cilk simd function"), NULL_TREE);
TREE_CHAIN (c) = attrs;
attrs = c;
c = build_tree_list (get_identifier ("omp declare simd"), cl);
TREE_CHAIN (c) = attrs;
if (processing_template_decl)
ATTR_IS_DEPENDENT (c) = 1;
attrs = c;
}
info->fndecl_seen = true;
XDELETE (parser->cilk_simd_fn_info);
parser->cilk_simd_fn_info = NULL;
return attrs;
}
/* Finalize #pragma omp declare simd clauses after direct declarator has
been parsed, and put that into "omp declare simd" attribute. */
static tree
cp_parser_late_parsing_omp_declare_simd (cp_parser *parser, tree attrs)
{
struct cp_token_cache *ce;
cp_omp_declare_simd_data *data = parser->omp_declare_simd;
int i;
if (!data->error_seen && data->fndecl_seen)
{
error ("%<#pragma omp declare simd%> not immediately followed by "
"a single function declaration or definition");
data->error_seen = true;
return attrs;
}
if (data->error_seen)
return attrs;
FOR_EACH_VEC_ELT (data->tokens, i, ce)
{
tree c, cl;
cp_parser_push_lexer_for_tokens (parser, ce);
parser->lexer->in_pragma = true;
gcc_assert (cp_lexer_peek_token (parser->lexer)->type == CPP_PRAGMA);
cp_token *pragma_tok = cp_lexer_consume_token (parser->lexer);
cp_lexer_consume_token (parser->lexer);
cl = cp_parser_omp_all_clauses (parser, OMP_DECLARE_SIMD_CLAUSE_MASK,
"#pragma omp declare simd", pragma_tok);
cp_parser_pop_lexer (parser);
if (cl)
cl = tree_cons (NULL_TREE, cl, NULL_TREE);
c = build_tree_list (get_identifier ("omp declare simd"), cl);
TREE_CHAIN (c) = attrs;
if (processing_template_decl)
ATTR_IS_DEPENDENT (c) = 1;
attrs = c;
}
data->fndecl_seen = true;
return attrs;
}
/* OpenMP 4.0:
# pragma omp declare target new-line
declarations and definitions
# pragma omp end declare target new-line */
static void
cp_parser_omp_declare_target (cp_parser *parser, cp_token *pragma_tok)
{
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
scope_chain->omp_declare_target_attribute++;
}
static void
cp_parser_omp_end_declare_target (cp_parser *parser, cp_token *pragma_tok)
{
const char *p = "";
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
p = IDENTIFIER_POINTER (id);
}
if (strcmp (p, "declare") == 0)
{
cp_lexer_consume_token (parser->lexer);
p = "";
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
p = IDENTIFIER_POINTER (id);
}
if (strcmp (p, "target") == 0)
cp_lexer_consume_token (parser->lexer);
else
{
cp_parser_error (parser, "expected %<target%>");
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return;
}
}
else
{
cp_parser_error (parser, "expected %<declare%>");
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return;
}
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
if (!scope_chain->omp_declare_target_attribute)
error_at (pragma_tok->location,
"%<#pragma omp end declare target%> without corresponding "
"%<#pragma omp declare target%>");
else
scope_chain->omp_declare_target_attribute--;
}
/* Helper function of cp_parser_omp_declare_reduction. Parse the combiner
expression and optional initializer clause of
#pragma omp declare reduction. We store the expression(s) as
either 3, 6 or 7 special statements inside of the artificial function's
body. The first two statements are DECL_EXPRs for the artificial
OMP_OUT resp. OMP_IN variables, followed by a statement with the combiner
expression that uses those variables.
If there was any INITIALIZER clause, this is followed by further statements,
the fourth and fifth statements are DECL_EXPRs for the artificial
OMP_PRIV resp. OMP_ORIG variables. If the INITIALIZER clause wasn't the
constructor variant (first token after open paren is not omp_priv),
then the sixth statement is a statement with the function call expression
that uses the OMP_PRIV and optionally OMP_ORIG variable.
Otherwise, the sixth statement is whatever statement cp_finish_decl emits
to initialize the OMP_PRIV artificial variable and there is seventh
statement, a DECL_EXPR of the OMP_PRIV statement again. */
static bool
cp_parser_omp_declare_reduction_exprs (tree fndecl, cp_parser *parser)
{
tree type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
gcc_assert (TREE_CODE (type) == REFERENCE_TYPE);
type = TREE_TYPE (type);
tree omp_out = build_lang_decl (VAR_DECL, get_identifier ("omp_out"), type);
DECL_ARTIFICIAL (omp_out) = 1;
pushdecl (omp_out);
add_decl_expr (omp_out);
tree omp_in = build_lang_decl (VAR_DECL, get_identifier ("omp_in"), type);
DECL_ARTIFICIAL (omp_in) = 1;
pushdecl (omp_in);
add_decl_expr (omp_in);
tree combiner;
tree omp_priv = NULL_TREE, omp_orig = NULL_TREE, initializer = NULL_TREE;
keep_next_level (true);
tree block = begin_omp_structured_block ();
combiner = cp_parser_expression (parser);
finish_expr_stmt (combiner);
block = finish_omp_structured_block (block);
add_stmt (block);
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
return false;
const char *p = "";
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
p = IDENTIFIER_POINTER (id);
}
if (strcmp (p, "initializer") == 0)
{
cp_lexer_consume_token (parser->lexer);
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return false;
p = "";
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
p = IDENTIFIER_POINTER (id);
}
omp_priv = build_lang_decl (VAR_DECL, get_identifier ("omp_priv"), type);
DECL_ARTIFICIAL (omp_priv) = 1;
pushdecl (omp_priv);
add_decl_expr (omp_priv);
omp_orig = build_lang_decl (VAR_DECL, get_identifier ("omp_orig"), type);
DECL_ARTIFICIAL (omp_orig) = 1;
pushdecl (omp_orig);
add_decl_expr (omp_orig);
keep_next_level (true);
block = begin_omp_structured_block ();
bool ctor = false;
if (strcmp (p, "omp_priv") == 0)
{
bool is_direct_init, is_non_constant_init;
ctor = true;
cp_lexer_consume_token (parser->lexer);
/* Reject initializer (omp_priv) and initializer (omp_priv ()). */
if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)
|| (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)
&& cp_lexer_peek_nth_token (parser->lexer, 2)->type
== CPP_CLOSE_PAREN
&& cp_lexer_peek_nth_token (parser->lexer, 3)->type
== CPP_CLOSE_PAREN))
{
finish_omp_structured_block (block);
error ("invalid initializer clause");
return false;
}
initializer = cp_parser_initializer (parser, &is_direct_init,
&is_non_constant_init);
cp_finish_decl (omp_priv, initializer, !is_non_constant_init,
NULL_TREE, LOOKUP_ONLYCONVERTING);
}
else
{
cp_parser_parse_tentatively (parser);
tree fn_name = cp_parser_id_expression (parser, /*template_p=*/false,
/*check_dependency_p=*/true,
/*template_p=*/NULL,
/*declarator_p=*/false,
/*optional_p=*/false);
vec<tree, va_gc> *args;
if (fn_name == error_mark_node
|| cp_parser_error_occurred (parser)
|| !cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)
|| ((args = cp_parser_parenthesized_expression_list
(parser, non_attr, /*cast_p=*/false,
/*allow_expansion_p=*/true,
/*non_constant_p=*/NULL)),
cp_parser_error_occurred (parser)))
{
finish_omp_structured_block (block);
cp_parser_abort_tentative_parse (parser);
cp_parser_error (parser, "expected id-expression (arguments)");
return false;
}
unsigned int i;
tree arg;
FOR_EACH_VEC_SAFE_ELT (args, i, arg)
if (arg == omp_priv
|| (TREE_CODE (arg) == ADDR_EXPR
&& TREE_OPERAND (arg, 0) == omp_priv))
break;
cp_parser_abort_tentative_parse (parser);
if (arg == NULL_TREE)
error ("one of the initializer call arguments should be %<omp_priv%>"
" or %<&omp_priv%>");
initializer = cp_parser_postfix_expression (parser, false, false, false,
false, NULL);
finish_expr_stmt (initializer);
}
block = finish_omp_structured_block (block);
cp_walk_tree (&block, cp_remove_omp_priv_cleanup_stmt, omp_priv, NULL);
add_stmt (block);
if (ctor)
add_decl_expr (omp_orig);
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
return false;
}
if (!cp_lexer_next_token_is (parser->lexer, CPP_PRAGMA_EOL))
cp_parser_required_error (parser, RT_PRAGMA_EOL, /*keyword=*/false);
return true;
}
/* OpenMP 4.0
#pragma omp declare reduction (reduction-id : typename-list : expression) \
initializer-clause[opt] new-line
initializer-clause:
initializer (omp_priv initializer)
initializer (function-name (argument-list)) */
static void
cp_parser_omp_declare_reduction (cp_parser *parser, cp_token *pragma_tok,
enum pragma_context)
{
auto_vec<tree> types;
enum tree_code reduc_code = ERROR_MARK;
tree reduc_id = NULL_TREE, orig_reduc_id = NULL_TREE, type;
unsigned int i;
cp_token *first_token;
cp_token_cache *cp;
int errs;
void *p;
/* Get the high-water mark for the DECLARATOR_OBSTACK. */
p = obstack_alloc (&declarator_obstack, 0);
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
goto fail;
switch (cp_lexer_peek_token (parser->lexer)->type)
{
case CPP_PLUS:
reduc_code = PLUS_EXPR;
break;
case CPP_MULT:
reduc_code = MULT_EXPR;
break;
case CPP_MINUS:
reduc_code = MINUS_EXPR;
break;
case CPP_AND:
reduc_code = BIT_AND_EXPR;
break;
case CPP_XOR:
reduc_code = BIT_XOR_EXPR;
break;
case CPP_OR:
reduc_code = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
reduc_code = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
reduc_code = TRUTH_ORIF_EXPR;
break;
case CPP_NAME:
reduc_id = orig_reduc_id = cp_parser_identifier (parser);
break;
default:
cp_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, %<^%>, "
"%<|%>, %<&&%>, %<||%> or identifier");
goto fail;
}
if (reduc_code != ERROR_MARK)
cp_lexer_consume_token (parser->lexer);
reduc_id = omp_reduction_id (reduc_code, reduc_id, NULL_TREE);
if (reduc_id == error_mark_node)
goto fail;
if (!cp_parser_require (parser, CPP_COLON, RT_COLON))
goto fail;
/* Types may not be defined in declare reduction type list. */
const char *saved_message;
saved_message = parser->type_definition_forbidden_message;
parser->type_definition_forbidden_message
= G_("types may not be defined in declare reduction type list");
bool saved_colon_corrects_to_scope_p;
saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
bool saved_colon_doesnt_start_class_def_p;
saved_colon_doesnt_start_class_def_p
= parser->colon_doesnt_start_class_def_p;
parser->colon_doesnt_start_class_def_p = true;
while (true)
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
type = cp_parser_type_id (parser);
if (type == error_mark_node)
;
else if (ARITHMETIC_TYPE_P (type)
&& (orig_reduc_id == NULL_TREE
|| (TREE_CODE (type) != COMPLEX_TYPE
&& (strcmp (IDENTIFIER_POINTER (orig_reduc_id),
"min") == 0
|| strcmp (IDENTIFIER_POINTER (orig_reduc_id),
"max") == 0))))
error_at (loc, "predeclared arithmetic type %qT in "
"%<#pragma omp declare reduction%>", type);
else if (TREE_CODE (type) == FUNCTION_TYPE
|| TREE_CODE (type) == METHOD_TYPE
|| TREE_CODE (type) == ARRAY_TYPE)
error_at (loc, "function or array type %qT in "
"%<#pragma omp declare reduction%>", type);
else if (TREE_CODE (type) == REFERENCE_TYPE)
error_at (loc, "reference type %qT in "
"%<#pragma omp declare reduction%>", type);
else if (TYPE_QUALS_NO_ADDR_SPACE (type))
error_at (loc, "const, volatile or __restrict qualified type %qT in "
"%<#pragma omp declare reduction%>", type);
else
types.safe_push (type);
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else
break;
}
/* Restore the saved message. */
parser->type_definition_forbidden_message = saved_message;
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
parser->colon_doesnt_start_class_def_p
= saved_colon_doesnt_start_class_def_p;
if (!cp_parser_require (parser, CPP_COLON, RT_COLON)
|| types.is_empty ())
{
fail:
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
goto done;
}
first_token = cp_lexer_peek_token (parser->lexer);
cp = NULL;
errs = errorcount;
FOR_EACH_VEC_ELT (types, i, type)
{
tree fntype
= build_function_type_list (void_type_node,
cp_build_reference_type (type, false),
NULL_TREE);
tree this_reduc_id = reduc_id;
if (!dependent_type_p (type))
this_reduc_id = omp_reduction_id (ERROR_MARK, reduc_id, type);
tree fndecl = build_lang_decl (FUNCTION_DECL, this_reduc_id, fntype);
DECL_SOURCE_LOCATION (fndecl) = pragma_tok->location;
DECL_ARTIFICIAL (fndecl) = 1;
DECL_EXTERNAL (fndecl) = 1;
DECL_DECLARED_INLINE_P (fndecl) = 1;
DECL_IGNORED_P (fndecl) = 1;
DECL_OMP_DECLARE_REDUCTION_P (fndecl) = 1;
DECL_ATTRIBUTES (fndecl)
= tree_cons (get_identifier ("gnu_inline"), NULL_TREE,
DECL_ATTRIBUTES (fndecl));
if (processing_template_decl)
fndecl = push_template_decl (fndecl);
bool block_scope = false;
tree block = NULL_TREE;
if (current_function_decl)
{
block_scope = true;
DECL_CONTEXT (fndecl) = global_namespace;
if (!processing_template_decl)
pushdecl (fndecl);
}
else if (current_class_type)
{
if (cp == NULL)
{
while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL)
&& cp_lexer_next_token_is_not (parser->lexer, CPP_EOF))
cp_lexer_consume_token (parser->lexer);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL))
goto fail;
cp = cp_token_cache_new (first_token,
cp_lexer_peek_nth_token (parser->lexer,
2));
}
DECL_STATIC_FUNCTION_P (fndecl) = 1;
finish_member_declaration (fndecl);
DECL_PENDING_INLINE_INFO (fndecl) = cp;
DECL_PENDING_INLINE_P (fndecl) = 1;
vec_safe_push (unparsed_funs_with_definitions, fndecl);
continue;
}
else
{
DECL_CONTEXT (fndecl) = current_namespace;
pushdecl (fndecl);
}
if (!block_scope)
start_preparsed_function (fndecl, NULL_TREE, SF_PRE_PARSED);
else
block = begin_omp_structured_block ();
if (cp)
{
cp_parser_push_lexer_for_tokens (parser, cp);
parser->lexer->in_pragma = true;
}
if (!cp_parser_omp_declare_reduction_exprs (fndecl, parser))
{
if (!block_scope)
finish_function (0);
else
DECL_CONTEXT (fndecl) = current_function_decl;
if (cp)
cp_parser_pop_lexer (parser);
goto fail;
}
if (cp)
cp_parser_pop_lexer (parser);
if (!block_scope)
finish_function (0);
else
{
DECL_CONTEXT (fndecl) = current_function_decl;
block = finish_omp_structured_block (block);
if (TREE_CODE (block) == BIND_EXPR)
DECL_SAVED_TREE (fndecl) = BIND_EXPR_BODY (block);
else if (TREE_CODE (block) == STATEMENT_LIST)
DECL_SAVED_TREE (fndecl) = block;
if (processing_template_decl)
add_decl_expr (fndecl);
}
cp_check_omp_declare_reduction (fndecl);
if (cp == NULL && types.length () > 1)
cp = cp_token_cache_new (first_token,
cp_lexer_peek_nth_token (parser->lexer, 2));
if (errs != errorcount)
break;
}
cp_parser_require_pragma_eol (parser, pragma_tok);
done:
/* Free any declarators allocated. */
obstack_free (&declarator_obstack, p);
}
/* OpenMP 4.0
#pragma omp declare simd declare-simd-clauses[optseq] new-line
#pragma omp declare reduction (reduction-id : typename-list : expression) \
initializer-clause[opt] new-line
#pragma omp declare target new-line */
static void
cp_parser_omp_declare (cp_parser *parser, cp_token *pragma_tok,
enum pragma_context context)
{
if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
if (strcmp (p, "simd") == 0)
{
cp_lexer_consume_token (parser->lexer);
cp_parser_omp_declare_simd (parser, pragma_tok,
context);
return;
}
cp_ensure_no_omp_declare_simd (parser);
if (strcmp (p, "reduction") == 0)
{
cp_lexer_consume_token (parser->lexer);
cp_parser_omp_declare_reduction (parser, pragma_tok,
context);
return;
}
if (!flag_openmp) /* flag_openmp_simd */
{
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return;
}
if (strcmp (p, "target") == 0)
{
cp_lexer_consume_token (parser->lexer);
cp_parser_omp_declare_target (parser, pragma_tok);
return;
}
}
cp_parser_error (parser, "expected %<simd%> or %<reduction%> "
"or %<target%>");
cp_parser_require_pragma_eol (parser, pragma_tok);
}
/* Main entry point to OpenMP statement pragmas. */
static void
cp_parser_omp_construct (cp_parser *parser, cp_token *pragma_tok)
{
tree stmt;
char p_name[sizeof "#pragma omp teams distribute parallel for simd"];
omp_clause_mask mask (0);
switch (pragma_tok->pragma_kind)
{
case PRAGMA_OACC_CACHE:
stmt = cp_parser_oacc_cache (parser, pragma_tok);
break;
case PRAGMA_OACC_DATA:
stmt = cp_parser_oacc_data (parser, pragma_tok);
break;
case PRAGMA_OACC_ENTER_DATA:
stmt = cp_parser_oacc_enter_exit_data (parser, pragma_tok, true);
break;
case PRAGMA_OACC_EXIT_DATA:
stmt = cp_parser_oacc_enter_exit_data (parser, pragma_tok, false);
break;
case PRAGMA_OACC_KERNELS:
stmt = cp_parser_oacc_kernels (parser, pragma_tok);
break;
case PRAGMA_OACC_LOOP:
stmt = cp_parser_oacc_loop (parser, pragma_tok);
break;
case PRAGMA_OACC_PARALLEL:
stmt = cp_parser_oacc_parallel (parser, pragma_tok);
break;
case PRAGMA_OACC_UPDATE:
stmt = cp_parser_oacc_update (parser, pragma_tok);
break;
case PRAGMA_OACC_WAIT:
stmt = cp_parser_oacc_wait (parser, pragma_tok);
break;
case PRAGMA_OMP_ATOMIC:
cp_parser_omp_atomic (parser, pragma_tok);
return;
case PRAGMA_OMP_CRITICAL:
stmt = cp_parser_omp_critical (parser, pragma_tok);
break;
case PRAGMA_OMP_DISTRIBUTE:
strcpy (p_name, "#pragma omp");
stmt = cp_parser_omp_distribute (parser, pragma_tok, p_name, mask, NULL);
break;
case PRAGMA_OMP_FOR:
strcpy (p_name, "#pragma omp");
stmt = cp_parser_omp_for (parser, pragma_tok, p_name, mask, NULL);
break;
case PRAGMA_OMP_MASTER:
stmt = cp_parser_omp_master (parser, pragma_tok);
break;
case PRAGMA_OMP_ORDERED:
stmt = cp_parser_omp_ordered (parser, pragma_tok);
break;
case PRAGMA_OMP_PARALLEL:
strcpy (p_name, "#pragma omp");
stmt = cp_parser_omp_parallel (parser, pragma_tok, p_name, mask, NULL);
break;
case PRAGMA_OMP_SECTIONS:
strcpy (p_name, "#pragma omp");
stmt = cp_parser_omp_sections (parser, pragma_tok, p_name, mask, NULL);
break;
case PRAGMA_OMP_SIMD:
strcpy (p_name, "#pragma omp");
stmt = cp_parser_omp_simd (parser, pragma_tok, p_name, mask, NULL);
break;
case PRAGMA_OMP_SINGLE:
stmt = cp_parser_omp_single (parser, pragma_tok);
break;
case PRAGMA_OMP_TASK:
stmt = cp_parser_omp_task (parser, pragma_tok);
break;
case PRAGMA_OMP_TASKGROUP:
stmt = cp_parser_omp_taskgroup (parser, pragma_tok);
break;
case PRAGMA_OMP_TEAMS:
strcpy (p_name, "#pragma omp");
stmt = cp_parser_omp_teams (parser, pragma_tok, p_name, mask, NULL);
break;
default:
gcc_unreachable ();
}
if (stmt)
SET_EXPR_LOCATION (stmt, pragma_tok->location);
}
/* Transactional Memory parsing routines. */
/* Parse a transaction attribute.
txn-attribute:
attribute
[ [ identifier ] ]
??? Simplify this when C++0x bracket attributes are
implemented properly. */
static tree
cp_parser_txn_attribute_opt (cp_parser *parser)
{
cp_token *token;
tree attr_name, attr = NULL;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE))
return cp_parser_attributes_opt (parser);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE))
return NULL_TREE;
cp_lexer_consume_token (parser->lexer);
if (!cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE))
goto error1;
token = cp_lexer_peek_token (parser->lexer);
if (token->type == CPP_NAME || token->type == CPP_KEYWORD)
{
token = cp_lexer_consume_token (parser->lexer);
attr_name = (token->type == CPP_KEYWORD
/* For keywords, use the canonical spelling,
not the parsed identifier. */
? ridpointers[(int) token->keyword]
: token->u.value);
attr = build_tree_list (attr_name, NULL_TREE);
}
else
cp_parser_error (parser, "expected identifier");
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
error1:
cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE);
return attr;
}
/* Parse a __transaction_atomic or __transaction_relaxed statement.
transaction-statement:
__transaction_atomic txn-attribute[opt] txn-noexcept-spec[opt]
compound-statement
__transaction_relaxed txn-noexcept-spec[opt] compound-statement
*/
static tree
cp_parser_transaction (cp_parser *parser, enum rid keyword)
{
unsigned char old_in = parser->in_transaction;
unsigned char this_in = 1, new_in;
cp_token *token;
tree stmt, attrs, noex;
gcc_assert (keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED);
token = cp_parser_require_keyword (parser, keyword,
(keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC
: RT_TRANSACTION_RELAXED));
gcc_assert (token != NULL);
if (keyword == RID_TRANSACTION_RELAXED)
this_in |= TM_STMT_ATTR_RELAXED;
else
{
attrs = cp_parser_txn_attribute_opt (parser);
if (attrs)
this_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER);
}
/* Parse a noexcept specification. */
noex = cp_parser_noexcept_specification_opt (parser, true, NULL, true);
/* Keep track if we're in the lexical scope of an outer transaction. */
new_in = this_in | (old_in & TM_STMT_ATTR_OUTER);
stmt = begin_transaction_stmt (token->location, NULL, this_in);
parser->in_transaction = new_in;
cp_parser_compound_statement (parser, NULL, false, false);
parser->in_transaction = old_in;
finish_transaction_stmt (stmt, NULL, this_in, noex);
return stmt;
}
/* Parse a __transaction_atomic or __transaction_relaxed expression.
transaction-expression:
__transaction_atomic txn-noexcept-spec[opt] ( expression )
__transaction_relaxed txn-noexcept-spec[opt] ( expression )
*/
static tree
cp_parser_transaction_expression (cp_parser *parser, enum rid keyword)
{
unsigned char old_in = parser->in_transaction;
unsigned char this_in = 1;
cp_token *token;
tree expr, noex;
bool noex_expr;
gcc_assert (keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED);
if (!flag_tm)
error (keyword == RID_TRANSACTION_RELAXED
? G_("%<__transaction_relaxed%> without transactional memory "
"support enabled")
: G_("%<__transaction_atomic%> without transactional memory "
"support enabled"));
token = cp_parser_require_keyword (parser, keyword,
(keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC
: RT_TRANSACTION_RELAXED));
gcc_assert (token != NULL);
if (keyword == RID_TRANSACTION_RELAXED)
this_in |= TM_STMT_ATTR_RELAXED;
/* Set this early. This might mean that we allow transaction_cancel in
an expression that we find out later actually has to be a constexpr.
However, we expect that cxx_constant_value will be able to deal with
this; also, if the noexcept has no constexpr, then what we parse next
really is a transaction's body. */
parser->in_transaction = this_in;
/* Parse a noexcept specification. */
noex = cp_parser_noexcept_specification_opt (parser, false, &noex_expr,
true);
if (!noex || !noex_expr
|| cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN)
{
cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN);
expr = cp_parser_expression (parser);
expr = finish_parenthesized_expr (expr);
cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN);
}
else
{
/* The only expression that is available got parsed for the noexcept
already. noexcept is true then. */
expr = noex;
noex = boolean_true_node;
}
expr = build_transaction_expr (token->location, expr, this_in, noex);
parser->in_transaction = old_in;
if (cp_parser_non_integral_constant_expression (parser, NIC_TRANSACTION))
return error_mark_node;
return (flag_tm ? expr : error_mark_node);
}
/* Parse a function-transaction-block.
function-transaction-block:
__transaction_atomic txn-attribute[opt] ctor-initializer[opt]
function-body
__transaction_atomic txn-attribute[opt] function-try-block
__transaction_relaxed ctor-initializer[opt] function-body
__transaction_relaxed function-try-block
*/
static bool
cp_parser_function_transaction (cp_parser *parser, enum rid keyword)
{
unsigned char old_in = parser->in_transaction;
unsigned char new_in = 1;
tree compound_stmt, stmt, attrs;
bool ctor_initializer_p;
cp_token *token;
gcc_assert (keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED);
token = cp_parser_require_keyword (parser, keyword,
(keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC
: RT_TRANSACTION_RELAXED));
gcc_assert (token != NULL);
if (keyword == RID_TRANSACTION_RELAXED)
new_in |= TM_STMT_ATTR_RELAXED;
else
{
attrs = cp_parser_txn_attribute_opt (parser);
if (attrs)
new_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER);
}
stmt = begin_transaction_stmt (token->location, &compound_stmt, new_in);
parser->in_transaction = new_in;
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY))
ctor_initializer_p = cp_parser_function_try_block (parser);
else
ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body
(parser, /*in_function_try_block=*/false);
parser->in_transaction = old_in;
finish_transaction_stmt (stmt, compound_stmt, new_in, NULL_TREE);
return ctor_initializer_p;
}
/* Parse a __transaction_cancel statement.
cancel-statement:
__transaction_cancel txn-attribute[opt] ;
__transaction_cancel txn-attribute[opt] throw-expression ;
??? Cancel and throw is not yet implemented. */
static tree
cp_parser_transaction_cancel (cp_parser *parser)
{
cp_token *token;
bool is_outer = false;
tree stmt, attrs;
token = cp_parser_require_keyword (parser, RID_TRANSACTION_CANCEL,
RT_TRANSACTION_CANCEL);
gcc_assert (token != NULL);
attrs = cp_parser_txn_attribute_opt (parser);
if (attrs)
is_outer = (parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER) != 0);
/* ??? Parse cancel-and-throw here. */
cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON);
if (!flag_tm)
{
error_at (token->location, "%<__transaction_cancel%> without "
"transactional memory support enabled");
return error_mark_node;
}
else if (parser->in_transaction & TM_STMT_ATTR_RELAXED)
{
error_at (token->location, "%<__transaction_cancel%> within a "
"%<__transaction_relaxed%>");
return error_mark_node;
}
else if (is_outer)
{
if ((parser->in_transaction & TM_STMT_ATTR_OUTER) == 0
&& !is_tm_may_cancel_outer (current_function_decl))
{
error_at (token->location, "outer %<__transaction_cancel%> not "
"within outer %<__transaction_atomic%>");
error_at (token->location,
" or a %<transaction_may_cancel_outer%> function");
return error_mark_node;
}
}
else if (parser->in_transaction == 0)
{
error_at (token->location, "%<__transaction_cancel%> not within "
"%<__transaction_atomic%>");
return error_mark_node;
}
stmt = build_tm_abort_call (token->location, is_outer);
add_stmt (stmt);
return stmt;
}
/* The parser. */
static GTY (()) cp_parser *the_parser;
/* Special handling for the first token or line in the file. The first
thing in the file might be #pragma GCC pch_preprocess, which loads a
PCH file, which is a GC collection point. So we need to handle this
first pragma without benefit of an existing lexer structure.
Always returns one token to the caller in *FIRST_TOKEN. This is
either the true first token of the file, or the first token after
the initial pragma. */
static void
cp_parser_initial_pragma (cp_token *first_token)
{
tree name = NULL;
cp_lexer_get_preprocessor_token (NULL, first_token);
if (first_token->pragma_kind != PRAGMA_GCC_PCH_PREPROCESS)
return;
cp_lexer_get_preprocessor_token (NULL, first_token);
if (first_token->type == CPP_STRING)
{
name = first_token->u.value;
cp_lexer_get_preprocessor_token (NULL, first_token);
if (first_token->type != CPP_PRAGMA_EOL)
error_at (first_token->location,
"junk at end of %<#pragma GCC pch_preprocess%>");
}
else
error_at (first_token->location, "expected string literal");
/* Skip to the end of the pragma. */
while (first_token->type != CPP_PRAGMA_EOL && first_token->type != CPP_EOF)
cp_lexer_get_preprocessor_token (NULL, first_token);
/* Now actually load the PCH file. */
if (name)
c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name));
/* Read one more token to return to our caller. We have to do this
after reading the PCH file in, since its pointers have to be
live. */
cp_lexer_get_preprocessor_token (NULL, first_token);
}
/* Parses the grainsize pragma for the _Cilk_for statement.
Syntax:
#pragma cilk grainsize = <VALUE>. */
static void
cp_parser_cilk_grainsize (cp_parser *parser, cp_token *pragma_tok)
{
if (cp_parser_require (parser, CPP_EQ, RT_EQ))
{
tree exp = cp_parser_binary_expression (parser, false, false,
PREC_NOT_OPERATOR, NULL);
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
if (!exp || exp == error_mark_node)
{
error_at (pragma_tok->location, "invalid grainsize for _Cilk_for");
return;
}
/* Make sure the next token is _Cilk_for, it is invalid otherwise. */
if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CILK_FOR))
cp_parser_cilk_for (parser, exp);
else
warning_at (cp_lexer_peek_token (parser->lexer)->location, 0,
"%<#pragma cilk grainsize%> is not followed by "
"%<_Cilk_for%>");
return;
}
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
}
/* Normal parsing of a pragma token. Here we can (and must) use the
regular lexer. */
static bool
cp_parser_pragma (cp_parser *parser, enum pragma_context context)
{
cp_token *pragma_tok;
unsigned int id;
pragma_tok = cp_lexer_consume_token (parser->lexer);
gcc_assert (pragma_tok->type == CPP_PRAGMA);
parser->lexer->in_pragma = true;
id = pragma_tok->pragma_kind;
if (id != PRAGMA_OMP_DECLARE_REDUCTION)
cp_ensure_no_omp_declare_simd (parser);
switch (id)
{
case PRAGMA_GCC_PCH_PREPROCESS:
error_at (pragma_tok->location,
"%<#pragma GCC pch_preprocess%> must be first");
break;
case PRAGMA_OMP_BARRIER:
switch (context)
{
case pragma_compound:
cp_parser_omp_barrier (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location, "%<#pragma omp barrier%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_FLUSH:
switch (context)
{
case pragma_compound:
cp_parser_omp_flush (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location, "%<#pragma omp flush%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_TASKWAIT:
switch (context)
{
case pragma_compound:
cp_parser_omp_taskwait (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location,
"%<#pragma omp taskwait%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_TASKYIELD:
switch (context)
{
case pragma_compound:
cp_parser_omp_taskyield (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location,
"%<#pragma omp taskyield%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_CANCEL:
switch (context)
{
case pragma_compound:
cp_parser_omp_cancel (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location,
"%<#pragma omp cancel%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_CANCELLATION_POINT:
switch (context)
{
case pragma_compound:
cp_parser_omp_cancellation_point (parser, pragma_tok);
return false;
case pragma_stmt:
error_at (pragma_tok->location,
"%<#pragma omp cancellation point%> may only be "
"used in compound statements");
break;
default:
goto bad_stmt;
}
break;
case PRAGMA_OMP_THREADPRIVATE:
cp_parser_omp_threadprivate (parser, pragma_tok);
return false;
case PRAGMA_OMP_DECLARE_REDUCTION:
cp_parser_omp_declare (parser, pragma_tok, context);
return false;
case PRAGMA_OACC_CACHE:
case PRAGMA_OACC_DATA:
case PRAGMA_OACC_ENTER_DATA:
case PRAGMA_OACC_EXIT_DATA:
case PRAGMA_OACC_KERNELS:
case PRAGMA_OACC_PARALLEL:
case PRAGMA_OACC_LOOP:
case PRAGMA_OACC_UPDATE:
case PRAGMA_OACC_WAIT:
case PRAGMA_OMP_ATOMIC:
case PRAGMA_OMP_CRITICAL:
case PRAGMA_OMP_DISTRIBUTE:
case PRAGMA_OMP_FOR:
case PRAGMA_OMP_MASTER:
case PRAGMA_OMP_ORDERED:
case PRAGMA_OMP_PARALLEL:
case PRAGMA_OMP_SECTIONS:
case PRAGMA_OMP_SIMD:
case PRAGMA_OMP_SINGLE:
case PRAGMA_OMP_TASK:
case PRAGMA_OMP_TASKGROUP:
case PRAGMA_OMP_TEAMS:
if (context != pragma_stmt && context != pragma_compound)
goto bad_stmt;
cp_parser_omp_construct (parser, pragma_tok);
return true;
case PRAGMA_OMP_TARGET:
return cp_parser_omp_target (parser, pragma_tok, context);
case PRAGMA_OMP_END_DECLARE_TARGET:
cp_parser_omp_end_declare_target (parser, pragma_tok);
return false;
case PRAGMA_OMP_SECTION:
error_at (pragma_tok->location,
"%<#pragma omp section%> may only be used in "
"%<#pragma omp sections%> construct");
break;
case PRAGMA_IVDEP:
{
if (context == pragma_external)
{
error_at (pragma_tok->location,
"%<#pragma GCC ivdep%> must be inside a function");
break;
}
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
cp_token *tok;
tok = cp_lexer_peek_token (the_parser->lexer);
if (tok->type != CPP_KEYWORD
|| (tok->keyword != RID_FOR && tok->keyword != RID_WHILE
&& tok->keyword != RID_DO))
{
cp_parser_error (parser, "for, while or do statement expected");
return false;
}
cp_parser_iteration_statement (parser, true);
return true;
}
case PRAGMA_CILK_SIMD:
if (context == pragma_external)
{
error_at (pragma_tok->location,
"%<#pragma simd%> must be inside a function");
break;
}
cp_parser_cilk_simd (parser, pragma_tok);
return true;
case PRAGMA_CILK_GRAINSIZE:
if (context == pragma_external)
{
error_at (pragma_tok->location,
"%<#pragma cilk grainsize%> must be inside a function");
break;
}
/* Ignore the pragma if Cilk Plus is not enabled. */
if (flag_cilkplus)
{
cp_parser_cilk_grainsize (parser, pragma_tok);
return true;
}
else
{
error_at (pragma_tok->location, "-fcilkplus must be enabled to use "
"%<#pragma cilk grainsize%>");
break;
}
default:
gcc_assert (id >= PRAGMA_FIRST_EXTERNAL);
c_invoke_pragma_handler (id);
break;
bad_stmt:
cp_parser_error (parser, "expected declaration specifiers");
break;
}
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return false;
}
/* The interface the pragma parsers have to the lexer. */
enum cpp_ttype
pragma_lex (tree *value)
{
cp_token *tok;
enum cpp_ttype ret;
tok = cp_lexer_peek_token (the_parser->lexer);
ret = tok->type;
*value = tok->u.value;
if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF)
ret = CPP_EOF;
else if (ret == CPP_STRING)
*value = cp_parser_string_literal (the_parser, false, false);
else
{
cp_lexer_consume_token (the_parser->lexer);
if (ret == CPP_KEYWORD)
ret = CPP_NAME;
}
return ret;
}
/* External interface. */
/* Parse one entire translation unit. */
void
c_parse_file (void)
{
static bool already_called = false;
if (already_called)
fatal_error (input_location,
"inter-module optimizations not implemented for C++");
already_called = true;
the_parser = cp_parser_new ();
push_deferring_access_checks (flag_access_control
? dk_no_deferred : dk_no_check);
cp_parser_translation_unit (the_parser);
the_parser = NULL;
}
/* Parses the Cilk Plus #pragma simd and SIMD-enabled function attribute's
vectorlength clause:
Syntax:
vectorlength ( constant-expression ) */
static tree
cp_parser_cilk_simd_vectorlength (cp_parser *parser, tree clauses,
bool is_simd_fn)
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
tree expr;
/* The vectorlength clause in #pragma simd behaves exactly like OpenMP's
safelen clause. Thus, vectorlength is represented as OMP 4.0
safelen. For SIMD-enabled function it is represented by OMP 4.0
simdlen. */
if (!is_simd_fn)
check_no_duplicate_clause (clauses, OMP_CLAUSE_SAFELEN, "vectorlength",
loc);
else
check_no_duplicate_clause (clauses, OMP_CLAUSE_SIMDLEN, "vectorlength",
loc);
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return error_mark_node;
expr = cp_parser_constant_expression (parser);
expr = maybe_constant_value (expr);
/* If expr == error_mark_node, then don't emit any errors nor
create a clause. if any of the above functions returns
error mark node then they would have emitted an error message. */
if (expr == error_mark_node)
;
else if (!TREE_TYPE (expr)
|| !TREE_CONSTANT (expr)
|| !INTEGRAL_TYPE_P (TREE_TYPE (expr)))
error_at (loc, "vectorlength must be an integer constant");
else if (TREE_CONSTANT (expr)
&& exact_log2 (TREE_INT_CST_LOW (expr)) == -1)
error_at (loc, "vectorlength must be a power of 2");
else
{
tree c;
if (!is_simd_fn)
{
c = build_omp_clause (loc, OMP_CLAUSE_SAFELEN);
OMP_CLAUSE_SAFELEN_EXPR (c) = expr;
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
else
{
c = build_omp_clause (loc, OMP_CLAUSE_SIMDLEN);
OMP_CLAUSE_SIMDLEN_EXPR (c) = expr;
OMP_CLAUSE_CHAIN (c) = clauses;
clauses = c;
}
}
if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN))
return error_mark_node;
return clauses;
}
/* Handles the Cilk Plus #pragma simd linear clause.
Syntax:
linear ( simd-linear-variable-list )
simd-linear-variable-list:
simd-linear-variable
simd-linear-variable-list , simd-linear-variable
simd-linear-variable:
id-expression
id-expression : simd-linear-step
simd-linear-step:
conditional-expression */
static tree
cp_parser_cilk_simd_linear (cp_parser *parser, tree clauses)
{
location_t loc = cp_lexer_peek_token (parser->lexer)->location;
if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN))
return clauses;
if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME))
{
cp_parser_error (parser, "expected identifier");
cp_parser_skip_to_closing_parenthesis (parser, false, false, true);
return error_mark_node;
}
bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p;
parser->colon_corrects_to_scope_p = false;
while (1)
{
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME))
{
cp_parser_error (parser, "expected variable-name");
clauses = error_mark_node;
break;
}
tree var_name = cp_parser_id_expression (parser, false, true, NULL,
false, false);
tree decl = cp_parser_lookup_name_simple (parser, var_name,
token->location);
if (decl == error_mark_node)
{
cp_parser_name_lookup_error (parser, var_name, decl, NLE_NULL,
token->location);
clauses = error_mark_node;
}
else
{
tree e = NULL_TREE;
tree step_size = integer_one_node;
/* If present, parse the linear step. Otherwise, assume the default
value of 1. */
if (cp_lexer_peek_token (parser->lexer)->type == CPP_COLON)
{
cp_lexer_consume_token (parser->lexer);
e = cp_parser_assignment_expression (parser);
e = maybe_constant_value (e);
if (e == error_mark_node)
{
/* If an error has occurred, then the whole pragma is
considered ill-formed. Thus, no reason to keep
parsing. */
clauses = error_mark_node;
break;
}
else if (type_dependent_expression_p (e)
|| value_dependent_expression_p (e)
|| (TREE_TYPE (e)
&& INTEGRAL_TYPE_P (TREE_TYPE (e))
&& (TREE_CONSTANT (e)
|| DECL_P (e))))
step_size = e;
else
cp_parser_error (parser,
"step size must be an integer constant "
"expression or an integer variable");
}
/* Use the OMP_CLAUSE_LINEAR, which has the same semantics. */
tree l = build_omp_clause (loc, OMP_CLAUSE_LINEAR);
OMP_CLAUSE_DECL (l) = decl;
OMP_CLAUSE_LINEAR_STEP (l) = step_size;
OMP_CLAUSE_CHAIN (l) = clauses;
clauses = l;
}
if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA))
cp_lexer_consume_token (parser->lexer);
else if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))
break;
else
{
error_at (cp_lexer_peek_token (parser->lexer)->location,
"expected %<,%> or %<)%> after %qE", decl);
clauses = error_mark_node;
break;
}
}
parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p;
cp_parser_skip_to_closing_parenthesis (parser, false, false, true);
return clauses;
}
/* Returns the name of the next clause. If the clause is not
recognized, then PRAGMA_CILK_CLAUSE_NONE is returned and the next
token is not consumed. Otherwise, the appropriate enum from the
pragma_simd_clause is returned and the token is consumed. */
static pragma_omp_clause
cp_parser_cilk_simd_clause_name (cp_parser *parser)
{
pragma_omp_clause clause_type;
cp_token *token = cp_lexer_peek_token (parser->lexer);
if (token->keyword == RID_PRIVATE)
clause_type = PRAGMA_CILK_CLAUSE_PRIVATE;
else if (!token->u.value || token->type != CPP_NAME)
return PRAGMA_CILK_CLAUSE_NONE;
else if (!strcmp (IDENTIFIER_POINTER (token->u.value), "vectorlength"))
clause_type = PRAGMA_CILK_CLAUSE_VECTORLENGTH;
else if (!strcmp (IDENTIFIER_POINTER (token->u.value), "linear"))
clause_type = PRAGMA_CILK_CLAUSE_LINEAR;
else if (!strcmp (IDENTIFIER_POINTER (token->u.value), "firstprivate"))
clause_type = PRAGMA_CILK_CLAUSE_FIRSTPRIVATE;
else if (!strcmp (IDENTIFIER_POINTER (token->u.value), "lastprivate"))
clause_type = PRAGMA_CILK_CLAUSE_LASTPRIVATE;
else if (!strcmp (IDENTIFIER_POINTER (token->u.value), "reduction"))
clause_type = PRAGMA_CILK_CLAUSE_REDUCTION;
else
return PRAGMA_CILK_CLAUSE_NONE;
cp_lexer_consume_token (parser->lexer);
return clause_type;
}
/* Parses all the #pragma simd clauses. Returns a list of clauses found. */
static tree
cp_parser_cilk_simd_all_clauses (cp_parser *parser, cp_token *pragma_token)
{
tree clauses = NULL_TREE;
while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL)
&& clauses != error_mark_node)
{
pragma_omp_clause c_kind;
c_kind = cp_parser_cilk_simd_clause_name (parser);
if (c_kind == PRAGMA_CILK_CLAUSE_VECTORLENGTH)
clauses = cp_parser_cilk_simd_vectorlength (parser, clauses, false);
else if (c_kind == PRAGMA_CILK_CLAUSE_LINEAR)
clauses = cp_parser_cilk_simd_linear (parser, clauses);
else if (c_kind == PRAGMA_CILK_CLAUSE_PRIVATE)
/* Use the OpenMP 4.0 equivalent function. */
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_PRIVATE, clauses);
else if (c_kind == PRAGMA_CILK_CLAUSE_FIRSTPRIVATE)
/* Use the OpenMP 4.0 equivalent function. */
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_FIRSTPRIVATE,
clauses);
else if (c_kind == PRAGMA_CILK_CLAUSE_LASTPRIVATE)
/* Use the OMP 4.0 equivalent function. */
clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_LASTPRIVATE,
clauses);
else if (c_kind == PRAGMA_CILK_CLAUSE_REDUCTION)
/* Use the OMP 4.0 equivalent function. */
clauses = cp_parser_omp_clause_reduction (parser, clauses);
else
{
clauses = error_mark_node;
cp_parser_error (parser, "expected %<#pragma simd%> clause");
break;
}
}
cp_parser_skip_to_pragma_eol (parser, pragma_token);
if (clauses == error_mark_node)
return error_mark_node;
else
return c_finish_cilk_clauses (clauses);
}
/* Main entry-point for parsing Cilk Plus <#pragma simd> for loops. */
static void
cp_parser_cilk_simd (cp_parser *parser, cp_token *pragma_token)
{
tree clauses = cp_parser_cilk_simd_all_clauses (parser, pragma_token);
if (clauses == error_mark_node)
return;
if (cp_lexer_next_token_is_not_keyword (parser->lexer, RID_FOR))
{
error_at (cp_lexer_peek_token (parser->lexer)->location,
"for statement expected");
return;
}
tree sb = begin_omp_structured_block ();
int save = cp_parser_begin_omp_structured_block (parser);
tree ret = cp_parser_omp_for_loop (parser, CILK_SIMD, clauses, NULL);
if (ret)
cpp_validate_cilk_plus_loop (OMP_FOR_BODY (ret));
cp_parser_end_omp_structured_block (parser, save);
add_stmt (finish_omp_structured_block (sb));
}
/* Main entry-point for parsing Cilk Plus _Cilk_for
loops. The return value is error_mark_node
when errors happen and CILK_FOR tree on success. */
static tree
cp_parser_cilk_for (cp_parser *parser, tree grain)
{
if (cp_lexer_next_token_is_not_keyword (parser->lexer, RID_CILK_FOR))
gcc_unreachable ();
tree sb = begin_omp_structured_block ();
int save = cp_parser_begin_omp_structured_block (parser);
tree clauses = build_omp_clause (EXPR_LOCATION (grain), OMP_CLAUSE_SCHEDULE);
OMP_CLAUSE_SCHEDULE_KIND (clauses) = OMP_CLAUSE_SCHEDULE_CILKFOR;
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clauses) = grain;
clauses = finish_omp_clauses (clauses);
tree ret = cp_parser_omp_for_loop (parser, CILK_FOR, clauses, NULL);
if (ret)
cpp_validate_cilk_plus_loop (ret);
else
ret = error_mark_node;
cp_parser_end_omp_structured_block (parser, save);
add_stmt (finish_omp_structured_block (sb));
return ret;
}
/* Create an identifier for a generic parameter type (a synthesized
template parameter implied by `auto' or a concept identifier). */
static GTY(()) int generic_parm_count;
static tree
make_generic_type_name ()
{
char buf[32];
sprintf (buf, "auto:%d", ++generic_parm_count);
return get_identifier (buf);
}
/* Predicate that behaves as is_auto_or_concept but matches the parent
node of the generic type rather than the generic type itself. This
allows for type transformation in add_implicit_template_parms. */
static inline bool
tree_type_is_auto_or_concept (const_tree t)
{
return TREE_TYPE (t) && is_auto_or_concept (TREE_TYPE (t));
}
/* Add an implicit template type parameter to the CURRENT_TEMPLATE_PARMS
(creating a new template parameter list if necessary). Returns the newly
created template type parm. */
tree
synthesize_implicit_template_parm (cp_parser *parser)
{
gcc_assert (current_binding_level->kind == sk_function_parms);
/* We are either continuing a function template that already contains implicit
template parameters, creating a new fully-implicit function template, or
extending an existing explicit function template with implicit template
parameters. */
cp_binding_level *const entry_scope = current_binding_level;
bool become_template = false;
cp_binding_level *parent_scope = 0;
if (parser->implicit_template_scope)
{
gcc_assert (parser->implicit_template_parms);
current_binding_level = parser->implicit_template_scope;
}
else
{
/* Roll back to the existing template parameter scope (in the case of
extending an explicit function template) or introduce a new template
parameter scope ahead of the function parameter scope (or class scope
in the case of out-of-line member definitions). The function scope is
added back after template parameter synthesis below. */
cp_binding_level *scope = entry_scope;
while (scope->kind == sk_function_parms)
{
parent_scope = scope;
scope = scope->level_chain;
}
if (current_class_type && !LAMBDA_TYPE_P (current_class_type))
{
/* If not defining a class, then any class scope is a scope level in
an out-of-line member definition. In this case simply wind back
beyond the first such scope to inject the template parameter list.
Otherwise wind back to the class being defined. The latter can
occur in class member friend declarations such as:
class A {
void foo (auto);
};
class B {
friend void A::foo (auto);
};
The template parameter list synthesized for the friend declaration
must be injected in the scope of 'B'. This can also occur in
erroneous cases such as:
struct A {
struct B {
void foo (auto);
};
void B::foo (auto) {}
};
Here the attempted definition of 'B::foo' within 'A' is ill-formed
but, nevertheless, the template parameter list synthesized for the
declarator should be injected into the scope of 'A' as if the
ill-formed template was specified explicitly. */
while (scope->kind == sk_class && !scope->defining_class_p)
{
parent_scope = scope;
scope = scope->level_chain;
}
}
current_binding_level = scope;
if (scope->kind != sk_template_parms
|| !function_being_declared_is_template_p (parser))
{
/* Introduce a new template parameter list for implicit template
parameters. */
become_template = true;
parser->implicit_template_scope
= begin_scope (sk_template_parms, NULL);
++processing_template_decl;
parser->fully_implicit_function_template_p = true;
++parser->num_template_parameter_lists;
}
else
{
/* Synthesize implicit template parameters at the end of the explicit
template parameter list. */
gcc_assert (current_template_parms);
parser->implicit_template_scope = scope;
tree v = INNERMOST_TEMPLATE_PARMS (current_template_parms);
parser->implicit_template_parms
= TREE_VEC_ELT (v, TREE_VEC_LENGTH (v) - 1);
}
}
/* Synthesize a new template parameter and track the current template
parameter chain with implicit_template_parms. */
tree synth_id = make_generic_type_name ();
tree synth_tmpl_parm = finish_template_type_parm (class_type_node,
synth_id);
tree new_parm
= process_template_parm (parser->implicit_template_parms,
input_location,
build_tree_list (NULL_TREE, synth_tmpl_parm),
/*non_type=*/false,
/*param_pack=*/false);
if (parser->implicit_template_parms)
parser->implicit_template_parms
= TREE_CHAIN (parser->implicit_template_parms);
else
parser->implicit_template_parms = new_parm;
tree new_type = TREE_TYPE (getdecls ());
/* If creating a fully implicit function template, start the new implicit
template parameter list with this synthesized type, otherwise grow the
current template parameter list. */
if (become_template)
{
parent_scope->level_chain = current_binding_level;
tree new_parms = make_tree_vec (1);
TREE_VEC_ELT (new_parms, 0) = parser->implicit_template_parms;
current_template_parms = tree_cons (size_int (processing_template_decl),
new_parms, current_template_parms);
}
else
{
tree& new_parms = INNERMOST_TEMPLATE_PARMS (current_template_parms);
int new_parm_idx = TREE_VEC_LENGTH (new_parms);
new_parms = grow_tree_vec (new_parms, new_parm_idx + 1);
TREE_VEC_ELT (new_parms, new_parm_idx) = parser->implicit_template_parms;
}
current_binding_level = entry_scope;
return new_type;
}
/* Finish the declaration of a fully implicit function template. Such a
template has no explicit template parameter list so has not been through the
normal template head and tail processing. synthesize_implicit_template_parm
tries to do the head; this tries to do the tail. MEMBER_DECL_OPT should be
provided if the declaration is a class member such that its template
declaration can be completed. If MEMBER_DECL_OPT is provided the finished
form is returned. Otherwise NULL_TREE is returned. */
tree
finish_fully_implicit_template (cp_parser *parser, tree member_decl_opt)
{
gcc_assert (parser->fully_implicit_function_template_p);
if (member_decl_opt && member_decl_opt != error_mark_node
&& DECL_VIRTUAL_P (member_decl_opt))
{
error_at (DECL_SOURCE_LOCATION (member_decl_opt),
"implicit templates may not be %<virtual%>");
DECL_VIRTUAL_P (member_decl_opt) = false;
}
if (member_decl_opt)
member_decl_opt = finish_member_template_decl (member_decl_opt);
end_template_decl ();
parser->fully_implicit_function_template_p = false;
--parser->num_template_parameter_lists;
return member_decl_opt;
}
#include "gt-cp-parser.h"
|
pmm-OpenMP.c | #include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free()
#include <stdio.h> // biblioteca donde se encuentra la función printf()
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_set_dynamic(0);
#define omp_set_num_threads(4);
#endif
int main(int argc, char ** argv){
int **A, **B, **C;
int i, k, j, N;
double cgt1, cgt2, ncgt; //para tiempo de ejecución
time_t t;
// Semilla de rand()
srand((unsigned) time(&t));
// Obtenemos el numero de filas x columnas de la matriz cuadrada
if(argc < 4){
fprintf(stderr,"Error: %s <N_filas> <N_hebras/max> <Chunk default/(0...i)> <Sched (static, dynamic, guided)>\n", argv[0]);
exit(-1);
}
N = atoi(argv[1]);
// == Directivas de OpenMP
// ====================================================>
int chunk = 0;
omp_sched_t kind;
if(strcmp(argv[3], "default") == 0)
omp_get_schedule(&kind, &chunk);
else
chunk = atoi(argv[3]);
// Modificar OMP_SCHEDULE
if(strcmp(argv[4], "static") == 0) omp_set_schedule(1, chunk);
else if(strcmp(argv[4], "dynamic") == 0) omp_set_schedule(2, chunk);
else if(strcmp(argv[4], "guided") == 0) omp_set_schedule(3, chunk);
else {
printf("Error en el metodo de asignacion de trabajo a las hebras (static, dynamic, guided)\n");
exit(-1);
}
int nhebras;
if(strcmp(argv[2], "max") == 0) omp_set_num_threads(omp_get_num_procs());
else {
nhebras = atoi(argv[2]);
omp_set_num_threads(nhebras);
}
// == Reserva de Memoria
// ====================================================>
A = (int**) malloc (N*sizeof(int*));
B = (int**) malloc (N*sizeof(int*));
C = (int**) malloc (N*sizeof(int*));
#pragma omp parallel for shared(A,B,C,N) private(i) default(none) schedule(runtime)
for(i = 0; i<N; i++){
A[i] = (int*) malloc (N*sizeof(int));
B[i] = (int*) malloc (N*sizeof(int));
C[i] = (int*) malloc (N*sizeof(int));
if( A[i] == NULL || B[i] == NULL || C[i] == NULL){
printf("Error en la reserva de espacio para las matrices\n");
exit(-2);
}
}
// == Inicializacion
// ====================================================>
#pragma omp parallel for shared(A,B,C,N) private(i,k) default(none) schedule(runtime)
for(i = 0; i<N; i++){
for(k = 0; k<N; k++){
A[i][k] = 0;
B[i][k] = rand() % 8;
C[i][k] = rand() % 8;
}
}
// == Calculo
// ====================================================>
cgt1 = omp_get_wtime();
#pragma omp parallel for shared(A,B,C,N) private(i,j,k) default(none) schedule(runtime)
for(i = 0; i<N; i++){
for(j = 0; j<N; j++)
for(k = 0; k<N; k++)
A[i][j] += B[i][k] * C[k][j];
}
cgt2 = omp_get_wtime();
ncgt = (double)(cgt2 - cgt1);
// == Imprimir Mensajes
// ====================================================>
printf("Tiempo(seg.):%11.9f\n", ncgt);
printf("Tamaño total reservado por las matrices: %lu bytes\n", 3*N*N*sizeof(int));
printf("Tamaño de las matrices: %ux%u -> %lu bytes\n", N, N, N*N*sizeof(int));
// Imprimir el primer y último componente del resultado evita que las optimizaciones del compilador
// eliminen el código de la suma.
printf("A[0][0] = %u ... A[N-1][N-1] = %u \n", A[0][0], A[N-1][N-1]);
if(N < 4){
printf("\n----------- Matriz B ----------- \n");
for(i = 0; i<N; i++){
for(k = 0; k<N; k++)
printf("%u\t", B[i][k]);
printf("\n");
}
printf("\n----------- Matriz C ----------- \n");
for(i = 0; i<N; i++){
for(k = 0; k<N; k++)
printf("%u\t", C[i][k]);
printf("\n");
}
printf("\n----------- Matriz A (Resultado) ----------- \n");
for(i = 0; i<N; i++){
for(k = 0; k<N; k++)
printf("%u\t", A[i][k]);
printf("\n");
}
}
// == Liberar Memoria
// ====================================================>
#pragma omp parallel for private(i) shared(A,B,C,N) default(none) schedule(runtime)
for(i = 0; i<N; i++){
free(A[i]);
free(B[i]);
free(C[i]);
}
free(A);
free(B);
free(C);
} |
tool_available.c | // The OpenMP standard defines 3 ways of providing ompt_start_tool:
// 1. "statically-linking the tool’s definition of ompt_start_tool into an OpenMP application"
// RUN: %libomp-compile -DCODE -DTOOL && %libomp-run | FileCheck %s
// Note: We should compile the tool without -fopenmp as other tools developer
// would do. Otherwise this test may pass for the wrong reasons on Darwin.
// RUN: %clang %flags -DTOOL -shared -fPIC %s -o %T/tool.so
// 2. "introducing a dynamically-linked library that includes the tool’s definition of ompt_start_tool into the application’s address space"
// 2.1 Link with tool during compilation
// RUN: %libomp-compile -DCODE %no-as-needed-flag %T/tool.so && %libomp-run | FileCheck %s
// 2.2 Link with tool during compilation, but AFTER the runtime
// RUN: %libomp-compile -DCODE -lomp %no-as-needed-flag %T/tool.so && %libomp-run | FileCheck %s
// 2.3 Inject tool via the dynamic loader
// RUN: %libomp-compile -DCODE && %preload-tool %libomp-run | FileCheck %s
// 3. "providing the name of a dynamically-linked library appropriate for the architecture and operating system used by the application in the tool-libraries-var ICV"
// RUN: %libomp-compile -DCODE && env OMP_TOOL_LIBRARIES=%T/tool.so %libomp-run | FileCheck %s
// REQUIRES: ompt
/*
* This file contains code for an OMPT shared library tool to be
* loaded and the code for the OpenMP executable.
* -DTOOL enables the code for the tool during compilation
* -DCODE enables the code for the executable during compilation
*/
#ifdef CODE
#include "omp.h"
int main()
{
#pragma omp parallel num_threads(2)
{
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}0: ompt_event_runtime_shutdown
return 0;
}
#endif /* CODE */
#ifdef TOOL
#include <stdio.h>
#include <ompt.h>
int ompt_initialize(
ompt_function_lookup_t lookup,
ompt_data_t* tool_data)
{
printf("0: NULL_POINTER=%p\n", (void*)NULL);
return 1; //success
}
void ompt_finalize(ompt_data_t* tool_data)
{
printf("0: ompt_event_runtime_shutdown\n");
}
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
#endif /* TOOL */
|
bc_random.h | /*
* bc_random.h
* LLAMA Graph Analytics
*
* Copyright 2014
* The President and Fellows of Harvard College.
*
* Copyright 2014
* Oracle Labs.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef LL_GENERATED_CPP_BC_RANDOM_H
#define LL_GENERATED_CPP_BC_RANDOM_H
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <float.h>
#include <limits.h>
#include <cmath>
#include <algorithm>
#include <omp.h>
#include "llama/ll_bfs_template.h"
#include "llama/ll_writable_graph.h"
#include "benchmarks/benchmark.h"
// BFS/DFS definitions for the procedure
template <class Graph>
class bc_random_bfs : public ll_bfs_template
<Graph, short, true, false, false, true>
{
public:
bc_random_bfs(Graph& _G, float*& _G_BC, node_t& _s,
float*& _G_sigma, float*& _G_delta)
: ll_bfs_template<Graph, short, true, false, false, true>(_G),
G(_G), G_BC(_G_BC), s(_s), G_sigma(_G_sigma), G_delta(_G_delta){}
private: // list of varaibles
Graph& G;
float*& G_BC;
node_t& s;
float*& G_sigma;
float*& G_delta;
protected:
virtual void visit_fw(node_t v)
{
{
ll_edge_iterator iter;
G.out_iter_begin(iter, v);
for (edge_t w_idx = G.out_iter_next(iter);
w_idx != LL_NIL_EDGE;
w_idx = G.out_iter_next(iter)) {
if (!this->is_down_edge(w_idx)) continue;
node_t w = LL_ITER_OUT_NEXT_NODE(G, iter, w_idx);
float sigma_w_prv = 0.0 ;
sigma_w_prv = ((float)(0.000000)) ;
sigma_w_prv = sigma_w_prv + G_sigma[v] ;
ATOMIC_ADD(&G_sigma[w], sigma_w_prv);
}
}
}
virtual void visit_rv(node_t v)
{
if (v != s)
{
float __S3 = 0.0 ;
__S3 = ((float)(0.000000)) ;
ll_edge_iterator iter;
G.out_iter_begin(iter, v);
for (edge_t w_idx = G.out_iter_next(iter);
w_idx != LL_NIL_EDGE;
w_idx = G.out_iter_next(iter)) {
if (!this->is_down_edge(w_idx)) continue;
node_t w = LL_ITER_OUT_NEXT_NODE(G, iter, w_idx);
__S3 = __S3 + G_sigma[v] / G_sigma[w] * (1 + G_delta[w]) ;
}
G.set_node_prop(G_delta, v, __S3);
G.set_node_prop(G_BC, v, G_BC[v] + G_delta[v]);
}
}
virtual bool check_navigator(node_t v, edge_t v_idx) {return true;}
};
/**
* Betweenness Centrality - Randomized Algorithm
*/
template <class Graph>
class ll_b_bc_random : public ll_benchmark<Graph> {
int K;
float* G_BC;
public:
/**
* Create the benchmark
*
* @param k the number of seeds
*/
ll_b_bc_random(int k)
: ll_benchmark<Graph>("Betweenness Centrality - Randomized") {
K = k;
this->create_auto_array_for_nodes(G_BC);
}
/**
* Destroy the benchmark
*/
virtual ~ll_b_bc_random(void) {
}
/**
* Run the benchmark
*
* @return the numerical result, if applicable
*/
virtual double run(void) {
Graph& G = *this->_graph;
int32_t k = 0 ;
ll_memory_helper m;
float* G_sigma = m.allocate<float>(G.max_nodes());
float* G_delta = m.allocate<float>(G.max_nodes());
k = 0 ;
#pragma omp parallel for
for (node_t t0 = 0; t0 < G.max_nodes(); t0 ++)
G.set_node_prop(G_BC, t0, (float)0);
while (k < K)
{
node_t s;
s = G.pick_random_node() ;
#pragma omp parallel for
for (node_t t1 = 0; t1 < G.max_nodes(); t1 ++)
G.set_node_prop(G_sigma, t1, (float)0);
G.set_node_prop(G_sigma, s, (float)1);
bc_random_bfs<Graph> _BFS(G, G_BC, s, G_sigma, G_delta);
_BFS.prepare(s);
_BFS.do_bfs_forward();
_BFS.do_bfs_reverse();
k = k + 1 ;
}
return 0;
}
/**
* Finalize the benchmark
*
* @return the updated numerical result, if applicable
*/
virtual double finalize(void) {
float max = 0;
for (node_t n = 0; n < this->_graph->max_nodes(); n++) {
if (G_BC[n] > max) max = G_BC[n];
}
return max;
}
/**
* Print the results
*
* @param f the output file
*/
virtual void print_results(FILE* f) {
print_results_part(f, this->_graph, G_BC);
}
};
#endif
|
residualbased_elimination_builder_and_solver.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER )
#define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#include <unordered_set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedEliminationBuilderAndSolver
* @ingroup KratosCore
* @brief Current class provides an implementation for standard elimination builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ResidualBasedEliminationBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ResidualBasedEliminationBuilderAndSolverWithConstraints
KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolver);
/// Definition of the base class
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
/// The definition of the current class
typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> ClassType;
/// Definition of the classes from the base class
typedef typename BaseType::SizeType SizeType;
typedef typename BaseType::IndexType IndexType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
/// Definition of the equation id vector
typedef Element::EquationIdVectorType EquationIdVectorType;
typedef Element::DofsVectorType DofsVectorType;
/// Node definition
typedef Node<3> NodeType;
/// Containers definition
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
///@}
///@name Life Cycle
///@{
/**
* @brief Default constructor
*/
explicit ResidualBasedEliminationBuilderAndSolver() : BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
*/
explicit ResidualBasedEliminationBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) : BaseType(pNewLinearSystemSolver)
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
}
/**
* @brief Constructor.
*/
explicit ResidualBasedEliminationBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~ResidualBasedEliminationBuilderAndSolver() override
{
}
/**
* @brief Create method
* @param pNewLinearSystemSolver The linear solver for the system of equations
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(
typename TLinearSolver::Pointer pNewLinearSystemSolver,
Parameters ThisParameters
) const override
{
return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters);
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Function to perform the build of the RHS. The vector could be sized as the total number
* of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rb The RHS vector
*/
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rb
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
ElementsArrayType& r_elements_array = rModelPart.Elements();
// Getting the array of the conditions
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
// Getting the elements from the model
const int nelements = static_cast<int>(r_elements_array.size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(r_conditions_array.size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = r_elements_array.begin();
const auto it_cond_begin = r_conditions_array.begin();
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
EquationIdVectorType equation_id;
// Assemble all elements
double start_build = OpenMPUtils::GetCurrentTime();
#pragma omp parallel firstprivate(LHS_Contribution, RHS_Contribution, equation_id )
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateSystemContributions(*it_elem, LHS_Contribution, RHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id, mLockArray);
#else
Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id);
#endif
// Clean local elemental memory
pScheme->CleanMemory(*it_elem);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active) {
// Calculate elemental contribution
pScheme->CalculateSystemContributions(*it_cond, LHS_Contribution, RHS_Contribution, equation_id, r_current_process_info);
#ifdef USE_LOCKS_IN_ASSEMBLY
Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id, mLockArray);
#else
Assemble(rA, rb, LHS_Contribution, RHS_Contribution, equation_id);
#endif
// Clean local elemental memory
pScheme->CleanMemory(*it_cond);
}
}
}
const double stop_build = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System build time: " << stop_build - start_build << std::endl;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished building" << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building of the LHS
* @details Depending on the implementation choosen the size of the matrix could be equal to the total number of Dofs or to the number of unrestrained dofs
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
*/
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
ElementsArrayType& r_elements_array = rModelPart.Elements();
// Getting the array of the conditions
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
// Getting the elements from the model
const int nelements = static_cast<int>(r_elements_array.size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(r_conditions_array.size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = r_elements_array.begin();
const auto it_cond_begin = r_conditions_array.begin();
// Resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
// Contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
// Vector containing the localization in the system of the different terms
EquationIdVectorType equation_id;
#pragma omp parallel firstprivate(LHS_Contribution, equation_id )
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_elem, LHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, LHS_Contribution, equation_id);
// Clean local elemental memory
pScheme->CleanMemory(*it_elem);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_cond, LHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHS(rA, LHS_Contribution, equation_id);
// Clean local elemental memory
pScheme->CleanMemory(*it_cond);
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom
* and "N" is the total number of degrees of freedom involved.
* @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed
* degrees of freedom (but keeping the columns!!)
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
*/
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA
) override
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
// Getting the elements from the model
ElementsArrayType& r_elements_array = rModelPart.Elements();
// Getting the array of the conditions
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
// Getting the elements from the model
const int nelements = static_cast<int>(r_elements_array.size());
// Getting the array of the conditions
const int nconditions = static_cast<int>(r_conditions_array.size());
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
const auto it_elem_begin = r_elements_array.begin();
const auto it_cond_begin = r_conditions_array.begin();
// Resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
// Contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
// Vector containing the localization in the system of the different terms
EquationIdVectorType equation_id;
#pragma omp parallel firstprivate(LHS_Contribution, equation_id )
{
#pragma omp for schedule(guided, 512) nowait
for (int k = 0; k < nelements; ++k) {
auto it_elem = it_elem_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_elem, LHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHSCompleteOnFreeRows(rA, LHS_Contribution, equation_id);
// Clean local elemental memory
pScheme->CleanMemory(*it_elem);
}
}
#pragma omp for schedule(guided, 512)
for (int k = 0; k < nconditions; ++k) {
auto it_cond = it_cond_begin + k;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active) {
// Calculate elemental contribution
pScheme->CalculateLHSContribution(*it_cond, LHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleLHSCompleteOnFreeRows(rA, LHS_Contribution, equation_id);
// Clean local elemental memory
pScheme->CleanMemory(*it_cond);
}
}
}
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void SystemSolve(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(rb) != 0) {
norm_b = TSparseSpace::TwoNorm(rb);
} else {
norm_b = 0.0;
}
if (norm_b != 0.0) {
// Do solve
BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb);
} else
TSparseSpace::SetToZero(rDx);
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb,
ModelPart& rModelPart
)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(rb) != 0) {
norm_b = TSparseSpace::TwoNorm(rb);
} else {
norm_b = 0.0;
}
if (norm_b != 0.0) {
// Provide physical data as needed
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(rA, rDx, rb, BaseType::mDofSet, rModelPart);
// Do solve
BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb);
} else {
TSparseSpace::SetToZero(rDx);
KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
Timer::Start("Build");
Build(pScheme, rModelPart, rA, rb);
Timer::Stop("Build");
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb);
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
const double start_solve = OpenMPUtils::GetCurrentTime();
Timer::Start("Solve");
SystemSolveWithPhysics(rA, rDx, rb, rModelPart);
Timer::Stop("Solve");
const double stop_solve = OpenMPUtils::GetCurrentTime();
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY
BuildRHS(pScheme, rModelPart, rb);
SystemSolve(rA, rDx, rb);
KRATOS_CATCH("")
}
/**
* @brief Function to perform the build of the RHS.
* @details The vector could be sized as the total number of dofs or as the number of unrestrained ones
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemVectorType& rb
) override
{
KRATOS_TRY
// Resetting to zero the vector of reactions
if(BaseType::mCalculateReactionsFlag) {
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
}
// Getting the Elements
ElementsArrayType& r_elements_array = rModelPart.Elements();
// Getting the array of the conditions
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// Contributions to the system
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
// Vector containing the localization in the system of the different terms
EquationIdVectorType equation_id;
// Assemble all elements
#pragma omp parallel firstprivate( RHS_Contribution, equation_id)
{
const auto it_elem_begin = r_elements_array.begin();
const int nelements = static_cast<int>(r_elements_array.size());
#pragma omp for schedule(guided, 512) nowait
for (int i = 0; i < nelements; ++i) {
auto it_elem = it_elem_begin + i;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool element_is_active = true;
if (it_elem->IsDefined(ACTIVE))
element_is_active = it_elem->Is(ACTIVE);
if (element_is_active) {
// Calculate elemental Right Hand Side Contribution
pScheme->CalculateRHSContribution(*it_elem, RHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleRHS(rb, RHS_Contribution, equation_id);
}
}
// Assemble all conditions
const auto it_cond_begin = r_conditions_array.begin();
const int nconditions = static_cast<int>(r_conditions_array.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i < nconditions; ++i) {
auto it_cond = it_cond_begin + i;
// Detect if the element is active or not. If the user did not make any choice the element is active by default
bool condition_is_active = true;
if (it_cond->IsDefined(ACTIVE))
condition_is_active = it_cond->Is(ACTIVE);
if (condition_is_active) {
// Calculate elemental contribution
pScheme->CalculateRHSContribution(*it_cond, RHS_Contribution, equation_id, r_current_process_info);
// Assemble the elemental contribution
AssembleRHS(rb, RHS_Contribution, equation_id);
}
}
}
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
KRATOS_TRY;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
// Gets the array of elements from the modeler
ElementsArrayType& r_elements_array = rModelPart.Elements();
const int nelements = static_cast<int>(r_elements_array.size());
DofsVectorType elemental_dof_list;
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
SizeType nthreads = OpenMPUtils::GetNumThreads();
typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type;
std::vector<set_type> dofs_aux_list(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); ++i) {
dofs_aux_list[i].reserve(nelements);
}
#pragma omp parallel for firstprivate(nelements, elemental_dof_list)
for (int i = 0; i < static_cast<int>(nelements); ++i) {
auto it_elem = r_elements_array.begin() + i;
const IndexType this_thread_id = OpenMPUtils::ThisThread();
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_elem, elemental_dof_list, r_current_process_info);
dofs_aux_list[this_thread_id].insert(elemental_dof_list.begin(), elemental_dof_list.end());
}
ConditionsArrayType& r_conditions_array = rModelPart.Conditions();
const int nconditions = static_cast<int>(r_conditions_array.size());
#pragma omp parallel for firstprivate(nconditions, elemental_dof_list)
for (int i = 0; i < nconditions; ++i) {
auto it_cond = r_conditions_array.begin() + i;
const IndexType this_thread_id = OpenMPUtils::ThisThread();
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_cond, elemental_dof_list, r_current_process_info);
dofs_aux_list[this_thread_id].insert(elemental_dof_list.begin(), elemental_dof_list.end());
}
// Here we do a reduction in a tree so to have everything on thread 0
SizeType old_max = nthreads;
SizeType new_max = ceil(0.5*static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max) {
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); ++i) {
if (i + new_max < old_max) {
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5*static_cast<double>(old_max));
}
DofsArrayType dof_temp;
BaseType::mDofSet = DofsArrayType();
dof_temp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); ++it) {
dof_temp.push_back(*it);
}
dof_temp.Sort();
BaseType::mDofSet = dof_temp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef USE_LOCKS_IN_ASSEMBLY
if (mLockArray.size() != 0) {
for (int i = 0; i < static_cast<int>(mLockArray.size()); i++)
omp_destroy_lock(&mLockArray[i]);
}
mLockArray.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mLockArray.size()); i++)
omp_init_lock(&mLockArray[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if(BaseType::GetCalculateReactionsFlag()) {
for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) {
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl
<< "Node : " << dof_iterator->Id() << std::endl
<< "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(ModelPart& rModelPart) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
/**
* @brief This method resize and initializes the system of euqations
* @param pA The pointer to the LHS matrix
* @param pDx The pointer to the vector of Unknowns
* @param pb The pointer to the RHS vector
* @param rModelPart The model part to be computed
*/
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
) override
{
KRATOS_TRY
if (pA == nullptr) { // If the pointer is not initialized initialize it to an empty matrix
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == nullptr) { // If the pointer is not initialized initialize it to an empty matrix
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == nullptr) { // If the pointer is not initialized initialize it to an empty matrix
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == nullptr) { // If the pointer is not initialized initialize it to an empty matrix
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& rA = *pA;
TSystemVectorType& rDx = *pDx;
TSystemVectorType& rb = *pb;
// Resizing the system vectors and matrix
if (rA.size1() == 0 || BaseType::GetReshapeMatrixFlag()) { // If the matrix is not initialized
rA.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, rA, rModelPart);
} else {
if (rA.size1() != BaseType::mEquationSystemSize || rA.size2() != BaseType::mEquationSystemSize) {
KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl;
rA.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, rA, rModelPart);
}
}
if (rDx.size() != BaseType::mEquationSystemSize) {
rDx.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(rDx);
if (rb.size() != BaseType::mEquationSystemSize) {
rb.resize(BaseType::mEquationSystemSize, false);
}
TSparseSpace::SetToZero(rb);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true) {
const std::size_t reactions_vector_size = BaseType::mDofSet.size() - BaseType::mEquationSystemSize;
if (BaseType::mpReactionsVector->size() != reactions_vector_size)
BaseType::mpReactionsVector->resize(reactions_vector_size, false);
}
KRATOS_CATCH("")
}
/**
* @brief This method computes the reactions
* @param pScheme The integration scheme considered
* @param rModelPart The model part considered
* @param rA The LHS of the system
* @param rDx The vector of Unknowns
* @param rb The RHS vector
*/
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
//refresh RHS to have the correct reactions
BuildRHS(pScheme, rModelPart, rb);
// Updating variables
std::size_t i;
TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector;
for (auto it2 = BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2) {
i = (*it2)->EquationId();
if (i >= BaseType::mEquationSystemSize) {
i -= BaseType::mEquationSystemSize;
(*it2)->GetSolutionStepReactionValue() = -r_reactions_vector[i];
}
}
}
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param rA The LHS matrix
* @param rDx The Unknowns vector
* @param rb The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
this->mpReactionsVector.reset();
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart& rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "elimination_builder_and_solver"
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "elimination_builder_and_solver";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedEliminationBuilderAndSolver";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
#ifdef USE_LOCKS_IN_ASSEMBLY
std::vector<omp_lock_t> mLockArray;
#endif
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method assembles the system
* @param rA The LHS of the system
* @param rb The RHS of the system
* @param rLHSContribution The LHS local contribution
* @param rRHSContribution The RHS local contribution
* @param rEquationId The equation id
* @param rLockArray The lock of the dof
* @note The main difference respect the block builder and solver is the fact that the fixed DoFs are not considered on the assembling
*/
void Assemble(
TSystemMatrixType& rA,
TSystemVectorType& rb,
const LocalSystemMatrixType& rLHSContribution,
const LocalSystemVectorType& rRHSContribution,
const Element::EquationIdVectorType& rEquationId
#ifdef USE_LOCKS_IN_ASSEMBLY
,std::vector< omp_lock_t >& rLockArray
#endif
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) {
#ifdef USE_LOCKS_IN_ASSEMBLY
omp_set_lock(&rLockArray[i_global]);
rb[i_global] += rRHSContribution(i_local);
#else
double& r_a = rb[i_global];
const double& v_a = rRHSContribution(i_local);
#pragma omp atomic
r_a += v_a;
#endif
AssembleRowContributionFreeDofs(rA, rLHSContribution, i_global, i_local, rEquationId);
#ifdef USE_LOCKS_IN_ASSEMBLY
omp_unset_lock(&rLockArray[i_global]);
#endif
}
//note that computation of reactions is not performed here!
}
}
/**
* @brief This method construcs the relationship between the DoF
* @param pScheme The integration scheme
* @param rA The LHS of the system
* @param rModelPart The model part which defines the problem
*/
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType& rA,
ModelPart& rModelPart
)
{
// Filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
const SizeType equation_size = BaseType::mEquationSystemSize;
std::vector<std::unordered_set<IndexType> > indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++) {
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel firstprivate(ids)
{
// The process info
const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// We repeat the same declaration for each thead
std::vector<std::unordered_set<IndexType> > temp_indexes(equation_size);
#pragma omp for
for (int index = 0; index < static_cast<int>(equation_size); ++index)
temp_indexes[index].reserve(30);
// Getting the size of the array of elements from the model
const int number_of_elements = static_cast<int>(rModelPart.Elements().size());
// Element initial iterator
const auto it_elem_begin = rModelPart.ElementsBegin();
// We iterate over the elements
#pragma omp for schedule(guided, 512) nowait
for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) {
auto it_elem = it_elem_begin + i_elem;
pScheme->EquationId( *it_elem, ids, r_current_process_info);
for (auto& id_i : ids) {
if (id_i < BaseType::mEquationSystemSize) {
auto& row_indices = temp_indexes[id_i];
for (auto& id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Getting the size of the array of the conditions
const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size());
// Condition initial iterator
const auto it_cond_begin = rModelPart.ConditionsBegin();
// We iterate over the conditions
#pragma omp for schedule(guided, 512) nowait
for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) {
auto it_cond = it_cond_begin + i_cond;
pScheme->EquationId( *it_cond, ids, r_current_process_info);
for (auto& id_i : ids) {
if (id_i < BaseType::mEquationSystemSize) {
auto& row_indices = temp_indexes[id_i];
for (auto& id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Merging all the temporal indexes
#pragma omp critical
{
for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) {
indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end());
}
}
}
// Count the row sizes
SizeType nnz = 0;
for (IndexType i = 0; i < indices.size(); ++i)
nnz += indices[i].size();
rA = TSystemMatrixType(indices.size(), indices.size(), nnz);
double* Avalues = rA.value_data().begin();
std::size_t* Arow_indices = rA.index1_data().begin();
std::size_t* Acol_indices = rA.index2_data().begin();
// Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (IndexType i = 0; i < rA.size1(); ++i)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(rA.size1()); ++i) {
const IndexType row_begin = Arow_indices[i];
const IndexType row_end = Arow_indices[i + 1];
IndexType k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); ++it) {
Acol_indices[k] = *it;
Avalues[k] = 0.0;
++k;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
rA.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
}
/**
* @brief This method assembles the LHS of the system
* @param rA The LHS to assemble
* @param rLHSContribution The local LHS contribution
* @param rEquationId The equation id
*/
void AssembleLHS(
TSystemMatrixType& rA,
LocalSystemMatrixType& rLHSContribution,
EquationIdVectorType& rEquationId
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) {
for (IndexType j_local = 0; j_local < local_size; ++j_local) {
const IndexType j_global = rEquationId[j_local];
if (j_global < BaseType::mEquationSystemSize) {
rA(i_global, j_global) += rLHSContribution(i_local, j_local);
}
}
}
}
}
/**
* @brief This function is equivalent to the AssembleRowContribution of the block builder and solver
* @note The main difference respect the block builder and solver is the fact that the fixed DoFs are skipped
*/
inline void AssembleRowContributionFreeDofs(
TSystemMatrixType& rA,
const Matrix& rALocal,
const IndexType i,
const IndexType i_local,
const Element::EquationIdVectorType& EquationId
)
{
double* values_vector = rA.value_data().begin();
IndexType* index1_vector = rA.index1_data().begin();
IndexType* index2_vector = rA.index2_data().begin();
const IndexType left_limit = index1_vector[i];
// Find the first entry
// We iterate over the equation ids until we find the first equation id to be considered
// We count in which component we find an ID
IndexType last_pos = 0;
IndexType last_found = 0;
IndexType counter = 0;
for(IndexType j=0; j < EquationId.size(); ++j) {
++counter;
const IndexType j_global = EquationId[j];
if (j_global < BaseType::mEquationSystemSize) {
last_pos = ForwardFind(j_global,left_limit,index2_vector);
last_found = j_global;
break;
}
}
// If the counter is equal to the size of the EquationID vector that means that only one dof will be considered, if the number is greater means that all the dofs are fixed. If the number is below means that at we have several dofs free to be considered
if (counter <= EquationId.size()) {
#ifndef USE_LOCKS_IN_ASSEMBLY
double& r_a = values_vector[last_pos];
const double& v_a = rALocal(i_local,counter - 1);
#pragma omp atomic
r_a += v_a;
#else
values_vector[last_pos] += rALocal(i_local,counter - 1);
#endif
// Now find all of the other entries
IndexType pos = 0;
for(IndexType j = counter; j < EquationId.size(); ++j) {
IndexType id_to_find = EquationId[j];
if (id_to_find < BaseType::mEquationSystemSize) {
if(id_to_find > last_found)
pos = ForwardFind(id_to_find,last_pos+1,index2_vector);
else if(id_to_find < last_found)
pos = BackwardFind(id_to_find,last_pos-1,index2_vector);
else
pos = last_pos;
#ifndef USE_LOCKS_IN_ASSEMBLY
double& r = values_vector[pos];
const double& v = rALocal(i_local,j);
#pragma omp atomic
r += v;
#else
values_vector[pos] += rALocal(i_local,j);
#endif
last_found = id_to_find;
last_pos = pos;
}
}
}
}
inline IndexType ForwardFind(const IndexType id_to_find,
const IndexType start,
const IndexType* index_vector)
{
IndexType pos = start;
while(id_to_find != index_vector[pos]) pos++;
return pos;
}
inline IndexType BackwardFind(const IndexType id_to_find,
const IndexType start,
const IndexType* index_vector)
{
IndexType pos = start;
while(id_to_find != index_vector[pos]) pos--;
return pos;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method ensures that the contribution is unique
*/
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate) {
i++;
}
if (i == endit) {
v.push_back(candidate);
}
}
/**
* @brief This method assembles the RHS of the system
* @param rb The RHS to assemble
* @param rRHSContribution The local RHS contribution
* @param rEquationId The equation id
*/
void AssembleRHS(
TSystemVectorType& rb,
const LocalSystemVectorType& rRHSContribution,
const EquationIdVectorType& rEquationId
)
{
SizeType local_size = rRHSContribution.size();
if (BaseType::mCalculateReactionsFlag == false) {
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) { // Free dof
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = rb[i_global];
const double& rhs_value = rRHSContribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
} else {
TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector;
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) { //free dof
// ASSEMBLING THE SYSTEM VECTOR
double& b_value = rb[i_global];
const double& rhs_value = rRHSContribution[i_local];
#pragma omp atomic
b_value += rhs_value;
} else { // Fixed dof
double& b_value = r_reactions_vector[i_global - BaseType::mEquationSystemSize];
const double& rhs_value = rRHSContribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
}
/**
* @brief This method assembles the LHS of the system (on free rows)
* @param rA The LHS to assemble
* @param rLHSContribution The local LHS contribution
* @param rEquationId The equation id
*/
void AssembleLHSCompleteOnFreeRows(
TSystemMatrixType& rA,
LocalSystemMatrixType& rLHSContribution,
EquationIdVectorType& rEquationId
)
{
const SizeType local_size = rLHSContribution.size1();
for (IndexType i_local = 0; i_local < local_size; ++i_local) {
const IndexType i_global = rEquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) {
for (IndexType j_local = 0; j_local < local_size; ++j_local) {
const IndexType j_global = rEquationId[j_local];
rA(i_global, j_global) += rLHSContribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedEliminationBuilderAndSolver */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
|
ch_ompss.c | #include "ch_common.h"
#include "../extrae.h"
#include "../timing.h"
//#ifdef _OMPSS
//#warning "Compiling for OMPSS"
//#endif
//TODO: adjust wait() for timing
static int depth;
#pragma omp threadprivate(depth)
static int comm_round_sentinel; // <-- used to limit parallel communication tasks
void cholesky_mpi(const int ts, const int nt, double *A[nt][nt], double *B, double *C[nt], int *block_rank)
{
REGISTER_EXTRAE();
#pragma omp parallel
{
depth = 0;
#pragma omp single
{
INIT_TIMING(omp_get_num_threads());
char *send_flags = malloc(sizeof(char) * np);
char recv_flag = 0;
int num_send_tasks = 0;
int num_recv_tasks = 0;
int max_send_tasks = 0;
int max_recv_tasks = 0;
int num_comp_tasks = 0;
reset_send_flags(send_flags);
START_TIMING(TIME_TOTAL);
{
START_TIMING(TIME_CREATE);
for (int k = 0; k < nt; k++) {
int send_tasks = 0, recv_tasks = 0;
// sentinel task to limit communication task parallelism
#ifdef HAVE_COMM_SENTINEL
#pragma omp task depend(out: comm_round_sentinel)
{ if (comm_round_sentinel < 0) comm_round_sentinel = 0; }
#endif // HAVE_COMM_SENTINEL
if (block_rank[k*nt+k] == mype) {
num_comp_tasks++;
#pragma omp task depend(out: A[k][k]) firstprivate(k)
{
EXTRAE_ENTER(EVENT_POTRF);
START_TIMING(TIME_POTRF);
omp_potrf(A[k][k], ts, ts);
END_TIMING(TIME_POTRF);
EXTRAE_EXIT(EVENT_POTRF);
}
}
if (block_rank[k*nt+k] == mype && np != 1) {
#pragma omp task depend(in: A[k][k]) firstprivate(k) depend(in: comm_round_sentinel) untied
{
START_TIMING(TIME_COMM);
MPI_Request reqs[np];
int nreqs = 0;
//printf("[%d:%d:%d] Sending k=%d block (tag %d)\n", mype, omp_get_thread_num(), depth, k, k*nt+k);
for (int dst = 0; dst < np; dst++) {
int send_flag = 0;
for (int kk = k+1; kk < nt; kk++) {
if (dst == block_rank[k*nt+kk]) { send_flag = 1; break; }
}
if (send_flag && dst != mype) {
depth++;
MPI_Request send_req;
MPI_Isend(A[k][k], ts*ts, MPI_DOUBLE, dst, k*nt+k, MPI_COMM_WORLD, &send_req);
//wait(&send_req);
reqs[nreqs++] = send_req;
depth--;
}
}
for (int i = 0; i < nreqs; ++i) {
wait(&reqs[i]);
}
END_TIMING(TIME_COMM);
//printf("[%d:%d:%d] Done Sending k=%d block (tag %d)\n", mype, omp_get_thread_num(), depth, k, k*nt+k);
}
reset_send_flags(send_flags);
}
if (block_rank[k*nt+k] != mype) {
for (int i = k + 1; i < nt; i++) {
if (block_rank[k*nt+i] == mype) recv_flag = 1;
}
if (recv_flag) {
#pragma omp task depend(out: B) firstprivate(k) depend(in: comm_round_sentinel) untied
{
//printf("[%d:%d:%d] Receiving k=%d block from %d (tag %d)\n", mype, omp_get_thread_num(), depth, k, block_rank[k*nt+k], k*nt+k);
START_TIMING(TIME_COMM);
depth++;
MPI_Request recv_req;
MPI_Irecv(B, ts*ts, MPI_DOUBLE, block_rank[k*nt+k], k*nt+k, MPI_COMM_WORLD, &recv_req);
wait(&recv_req);
depth--;
END_TIMING(TIME_COMM);
//printf("[%d:%d:%d] Done Receiving k=%d block from %d (tag %d)\n", mype, omp_get_thread_num(), depth, k, block_rank[k*nt+k], k*nt+k);
}
recv_flag = 0;
}
}
#ifdef HAVE_INTERMEDIATE_COMM_SENTINEL
// sentinel task to limit communication task parallelism
#pragma omp task depend(out: comm_round_sentinel)
{ if (comm_round_sentinel < 0) comm_round_sentinel = 0; }
#endif
for (int i = k + 1; i < nt; i++) {
if (block_rank[k*nt+i] == mype) {
num_comp_tasks++;
if (block_rank[k*nt+k] == mype) {
#pragma omp task depend(in: A[k][k]) depend(out: A[k][i]) firstprivate(k, i)
{
EXTRAE_ENTER(EVENT_TRSM);
START_TIMING(TIME_TRSM);
omp_trsm(A[k][k], A[k][i], ts, ts);
END_TIMING(TIME_TRSM);
EXTRAE_EXIT(EVENT_TRSM);
}
} else {
#pragma omp task depend(in: B) depend(out: A[k][i]) firstprivate(k, i)
{
EXTRAE_ENTER(EVENT_TRSM);
START_TIMING(TIME_TRSM);
omp_trsm(B, A[k][i], ts, ts);
END_TIMING(TIME_TRSM);
EXTRAE_EXIT(EVENT_TRSM);
}
}
}
if (block_rank[k*nt+i] == mype && np != 1) {
for (int ii = k + 1; ii < i; ii++) {
if (!send_flags[block_rank[ii*nt+i]]) send_flags[block_rank[ii*nt+i]] = 1;
}
for (int ii = i + 1; ii < nt; ii++) {
if (!send_flags[block_rank[i*nt+ii]]) send_flags[block_rank[i*nt+ii]] = 1;
}
if (!send_flags[block_rank[i*nt+i]]) send_flags[block_rank[i*nt+i]] = 1;
for (int dst = 0; dst < np; dst++) {
if (send_flags[dst] && dst != mype) {
send_tasks++;
num_send_tasks++;
#pragma omp task depend(in: A[k][i]) firstprivate(k, i, dst) depend(in: comm_round_sentinel) untied
{
//printf("[%d:%d:%d] Sending k=%d i=%d block to %d (tag %d)\n", mype, omp_get_thread_num(), depth, k, i, dst, k*nt+i);
START_TIMING(TIME_COMM);
depth++;
MPI_Request send_req;
MPI_Isend(A[k][i], ts*ts, MPI_DOUBLE, dst, k*nt+i, MPI_COMM_WORLD, &send_req);
wait(&send_req);
depth--;
END_TIMING(TIME_COMM);
//printf("[%d:%d:%d] Done Sending k=%d i=%d block to %d (tag %d)\n", mype, omp_get_thread_num(), depth, k, i, dst, k*nt+i);
}
}
}
reset_send_flags(send_flags);
}
if (block_rank[k*nt+i] != mype) {
for (int ii = k + 1; ii < i; ii++) {
if (block_rank[ii*nt+i] == mype) recv_flag = 1;
}
for (int ii = i + 1; ii < nt; ii++) {
if (block_rank[i*nt+ii] == mype) recv_flag = 1;
}
if (block_rank[i*nt+i] == mype) recv_flag = 1;
if (recv_flag) {
recv_tasks++;
num_recv_tasks++;
#pragma omp task depend(out: C[i]) firstprivate(k, i) depend(in: comm_round_sentinel) untied
{
//printf("[%d:%d:%d] Receiving k=%d i=%d block from %d (tag %d)\n", mype, omp_get_thread_num(), depth, k, i, block_rank[k*nt+i], k*nt+i);
START_TIMING(TIME_COMM);
depth++;
MPI_Request recv_req;
MPI_Irecv(C[i], ts*ts, MPI_DOUBLE, block_rank[k*nt+i], k*nt+i, MPI_COMM_WORLD, &recv_req);
wait(&recv_req);
depth--;
END_TIMING(TIME_COMM);
//printf("[%d:%d:%d] Done Receiving k=%d i=%d block from %d (tag %d)\n", mype, omp_get_thread_num(), depth, k, i, block_rank[k*nt+i], k*nt+i);
}
recv_flag = 0;
}
}
}
if ((max_send_tasks + max_recv_tasks) < (send_tasks + recv_tasks)) {
max_send_tasks = send_tasks;
max_recv_tasks = recv_tasks;
}
for (int i = k + 1; i < nt; i++) {
for (int j = k + 1; j < i; j++) {
if (block_rank[j*nt+i] == mype) {
num_comp_tasks++;
if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] == mype) {
#pragma omp task depend(in: A[k][i], A[k][j]) depend(out: A[j][i]) firstprivate(k, j, i)
{
EXTRAE_ENTER(EVENT_GEMM);
START_TIMING(TIME_GEMM);
omp_gemm(A[k][i], A[k][j], A[j][i], ts, ts);
END_TIMING(TIME_GEMM);
EXTRAE_EXIT(EVENT_GEMM);
}
} else if (block_rank[k*nt+i] != mype && block_rank[k*nt+j] == mype) {
#pragma omp task depend(in: C[i], A[k][j]) depend(out: A[j][i]) firstprivate(k, j, i)
{
EXTRAE_ENTER(EVENT_GEMM);
START_TIMING(TIME_GEMM);
omp_gemm(C[i], A[k][j], A[j][i], ts, ts);
END_TIMING(TIME_GEMM);
EXTRAE_EXIT(EVENT_GEMM);
}
} else if (block_rank[k*nt+i] == mype && block_rank[k*nt+j] != mype) {
#pragma omp task depend(in: A[k][i], C[j]) depend(out: A[j][i]) firstprivate(k, j, i)
{
EXTRAE_ENTER(EVENT_GEMM);
START_TIMING(TIME_GEMM);
omp_gemm(A[k][i], C[j], A[j][i], ts, ts);
END_TIMING(TIME_GEMM);
EXTRAE_EXIT(EVENT_GEMM);
}
} else {
#pragma omp task depend(in: C[i], C[j]) depend(out: A[j][i]) firstprivate(k, j, i)
{
EXTRAE_ENTER(EVENT_GEMM);
START_TIMING(TIME_GEMM);
omp_gemm(C[i], C[j], A[j][i], ts, ts);
END_TIMING(TIME_GEMM);
EXTRAE_EXIT(EVENT_GEMM);
}
}
}
}
if (block_rank[i*nt+i] == mype) {
num_comp_tasks++;
if (block_rank[k*nt+i] == mype) {
#pragma omp task depend(in: A[k][i]) depend(out: A[i][i]) firstprivate(k, i)
{
EXTRAE_ENTER(EVENT_SYRK);
START_TIMING(TIME_SYRK);
omp_syrk(A[k][i], A[i][i], ts, ts);
END_TIMING(TIME_SYRK);
EXTRAE_EXIT(EVENT_SYRK);
}
} else {
#pragma omp task depend(in: C[i]) depend(out: A[i][i]) firstprivate(k, i)
{
EXTRAE_ENTER(EVENT_SYRK);
START_TIMING(TIME_SYRK);
omp_syrk(C[i], A[i][i], ts, ts);
END_TIMING(TIME_SYRK);
EXTRAE_EXIT(EVENT_SYRK);
}
}
}
}
}
END_TIMING(TIME_CREATE);
}
#pragma omp taskwait
END_TIMING(TIME_TOTAL);
MPI_Barrier(MPI_COMM_WORLD);
#ifdef USE_TIMING
PRINT_TIMINGS();
FREE_TIMING();
#endif
printf("[%d] max_send_tasks %d, max_recv_tasks %d, num_send_tasks %d, num_recv_tasks %d, num_comp_tasks %d\n",
mype, max_send_tasks, max_recv_tasks, num_send_tasks, num_recv_tasks, num_comp_tasks);
free(send_flags);
}// pragma omp single
}// pragma omp parallel
}
|
wshfl.c | /* Copyright 2018-2019. Massachusetts Institute of Technology.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2018-2019 Siddharth Iyer <ssi@mit.edu>
*
* Tamir J, Uecker M, Chen W, Lai P, Alley MT, Vasanawala SS, Lustig M.
* T2 shuffling: Sharp, multicontrast, volumetric fast spin‐echo imaging.
* Magnetic resonance in medicine. 2017 Jan 1;77(1):180-95.
*
* B Bilgic, BA Gagoski, SF Cauley, AP Fan, JR Polimeni, PE Grant,
* LL Wald, and K Setsompop, Wave-CAIPI for highly accelerated 3D
* imaging. Magn Reson Med (2014) doi: 10.1002/mrm.25347
*
* Iyer S, Bilgic B, Setsompop K.
* Faster T2 shuffling with Wave.
* Presented in the session: "Signal Encoding and Decoding" at ISMRM 2018.
* https://www.ismrm.org/18/program_files/O67.htm
*/
#include <stdbool.h>
#include <complex.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/fft.h"
#include "num/init.h"
#include "num/iovec.h"
#include "num/ops.h"
#include "num/ops_p.h"
#ifdef USE_CUDA
#include "num/gpuops.h"
#endif
#include "iter/iter.h"
#include "iter/lsqr.h"
#include "iter/misc.h"
#include "linops/linop.h"
#include "linops/fmac.h"
#include "linops/someops.h"
#include "linops/decompose_complex.h"
#include "misc/debug.h"
#include "misc/mri.h"
#include "misc/utils.h"
#include "misc/mmio.h"
#include "misc/misc.h"
#include "misc/opts.h"
#include "wavelet/wavthresh.h"
#include "lowrank/lrthresh.h"
#include "grecon/optreg.h"
#include "grecon/italgo.h"
static const char usage_str[] = "<maps> <wave> <phi> <reorder> <table> <output>";
static const char help_str[] =
"Perform a wave-shuffling reconstruction.\n\n"
"Conventions:\n"
" * (sx, sy, sz) - Spatial dimensions.\n"
" * wx - Extended FOV in READ_DIM due to\n"
" wave's voxel spreading.\n"
" * (nc, md) - Number of channels and ESPIRiT's \n"
" extended-SENSE model operator\n"
" dimensions (or # of maps).\n"
" * (tf, tk) - Turbo-factor and the rank\n"
" of the temporal basis used in\n"
" shuffling.\n"
" * ntr - Number of TRs, or the number of\n"
" (ky, kz) points acquired of one\n"
" echo image.\n"
" * n - Total number of (ky, kz) points\n"
" acquired. This is equal to the\n"
" product of ntr and tf.\n\n"
"Descriptions:\n"
" * reorder is an (n by 3) index matrix such that\n"
" [ky, kz, t] = reorder(i, :) represents the\n"
" (ky, kz) kspace position of the readout line\n"
" acquired at echo number (t), and 0 <= ky < sy,\n"
" 0 <= kz < sz, 0 <= t < tf).\n"
" * table is a (wx by nc by n) matrix such that\n"
" table(:, :, k) represents the kth multichannel\n"
" kspace line.\n\n"
"Expected dimensions:\n"
" * maps - ( sx, sy, sz, nc, md, 1, 1)\n"
" * wave - ( wx, sy, sz, 1, 1, 1, 1)\n"
" * phi - ( 1, 1, 1, 1, 1, tf, tk)\n"
" * output - ( sx, sy, sz, 1, md, 1, tk)\n"
" * reorder - ( n, 3, 1, 1, 1, 1, 1)\n"
" * table - ( wx, nc, n, 1, 1, 1, 1)";
/* Helper function to print out operator dimensions. */
static void print_opdims(const struct linop_s* op)
{
const struct iovec_s* domain = linop_domain(op);
const struct iovec_s* codomain = linop_codomain(op);
debug_printf(DP_INFO, "\tDomain: [");
for (long k = 0; k < domain->N; k ++)
debug_printf(DP_INFO, "%6ld", domain->dims[k]);
debug_printf(DP_INFO, "]\n");
debug_printf(DP_INFO, "\tCodomain: [");
for (long k = 0; k < codomain->N; k ++)
debug_printf(DP_INFO, "%6ld", codomain->dims[k]);
debug_printf(DP_INFO, "]\n");
}
/* Construct sampling mask array from reorder tables. */
static void construct_mask(
long reorder_dims[DIMS], complex float* reorder,
long mask_dims[DIMS], complex float* mask)
{
long n = reorder_dims[0];
long sy = mask_dims[1];
long sz = mask_dims[2];
long y = 0;
long z = 0;
long t = 0;
for (int i = 0; i < n; i++) {
y = lround(creal(reorder[i]));
z = lround(creal(reorder[i + n]));
t = lround(creal(reorder[i + 2 * n]));
mask[(y + z * sy) + t * sy * sz] = 1;
}
}
struct kern_s {
INTERFACE(linop_data_t);
unsigned int N;
long* reorder_dims; // Dimension of the index table: ( n, 3, 1, 1, 1, 1, 1, 1)
long* phi_dims; // Dimension of the temporal basis: ( 1, 1, 1, 1, 1, tf, tk, 1)
long* table_dims; // Dimension of the data table: (wx, nc, n, 1, 1, 1, 1, 1)
long* kernel_dims; // Dimension of the kernel: ( 1, sy, sz, 1, 1, 1, tk, tk)
complex float* reorder;
complex float* phi;
complex float* kernel;
complex float* gpu_kernel;
};
static DEF_TYPEID(kern_s);
/* Go to table from coefficient-kspace with memory efficiency. */
static void kern_apply(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long n = data->reorder_dims[0];
long tf = data->phi_dims[5];
long tk = data->phi_dims[6];
long input_dims[] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = wx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long perm_dims[] = { [0 ... DIMS - 1] = 1 };
perm_dims[0] = wx;
perm_dims[1] = nc;
perm_dims[3] = tk;
perm_dims[4] = sy;
perm_dims[5] = sz;
complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, src);
unsigned int permute_order[DIMS] = {0, 3, 5, 6, 1, 2, 4, 7};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, perm_dims, perm, input_dims, src, CFL_SIZE);
long vec_dims[] = {wx, nc, tf, 1};
long phi_mat_dims[] = { 1, 1, tf, tk};
long phi_in_dims[] = {wx, nc, 1, tk};
long fmac_dims[] = {wx, nc, tf, tk};
long line_dims[] = {wx, nc, 1, 1};
complex float* vec = md_alloc_sameplace(4, vec_dims, CFL_SIZE, src);
long vec_str[4];
md_calc_strides(4, vec_str, vec_dims, CFL_SIZE);
long phi_mat_str[4];
md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE);
long phi_in_str[4];
md_calc_strides(4, phi_in_str, phi_in_dims, CFL_SIZE);
long fmac_str[4];
md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE);
int y = -1;
int z = -1;
int t = -1;
for (int i = 0; i < n; i ++) {
y = lround(creal(data->reorder[i]));
z = lround(creal(data->reorder[i + n]));
t = lround(creal(data->reorder[i + 2 * n]));
md_clear(4, vec_dims, vec, CFL_SIZE);
md_zfmac2(4, fmac_dims, vec_str, vec, phi_in_str, (perm + ((wx * nc * tk) * (y + z * sy))), phi_mat_str, data->phi);
md_copy(4, line_dims, dst + (i * wx * nc), vec + (t * wx * nc), CFL_SIZE);
}
md_free(perm);
md_free(vec);
}
/* Collapse data table into the temporal basis for memory efficiency. */
static void kern_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long n = data->reorder_dims[0];
long tf = data->phi_dims[5];
long tk = data->phi_dims[6];
long perm_dims[] = { [0 ... DIMS - 1] = 1 };
perm_dims[0] = wx;
perm_dims[1] = nc;
perm_dims[3] = tk;
perm_dims[4] = sy;
perm_dims[5] = sz;
complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, dst);
md_clear(DIMS, perm_dims, perm, CFL_SIZE);
#ifdef _OPENMP
long num_threads = omp_get_max_threads();
#else
long num_threads = 1;
#endif
long vec_dims[] = {wx, nc, tf, 1};
long phi_mat_dims[] = { 1, 1, tf, tk};
long phi_out_dims[] = {wx, nc, 1, tk};
long fmac_dims[] = {wx, nc, tf, tk};
long line_dims[] = {wx, nc, 1, 1};
long vthrd_dims[] = {wx, nc, tf, 1, num_threads};
complex float* vec = md_alloc_sameplace(5, vthrd_dims, CFL_SIZE, dst);
md_clear(5, vthrd_dims, vec, CFL_SIZE);
long vec_str[4];
md_calc_strides(4, vec_str, vec_dims, CFL_SIZE);
long phi_mat_str[4];
md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE);
long phi_out_str[4];
md_calc_strides(4, phi_out_str, phi_out_dims, CFL_SIZE);
long fmac_str[4];
md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE);
long flag_dims[1] = { n };
complex float* flags = md_calloc(1, flag_dims, CFL_SIZE);
#pragma omp parallel for
for (int k = 0; k < n; k ++) {
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = 0;
#endif
int y = lround(creal(data->reorder[k]));
int z = lround(creal(data->reorder[k + n]));
int t = -1;
if (0 == flags[k]) {
md_clear(4, vec_dims, vec + (wx * nc * tf * tid), CFL_SIZE);
for (int i = k; i < n; i ++) {
if ((y == lround(creal(data->reorder[i]))) && (z == lround(creal(data->reorder[i + n])))) {
flags[i] = 1;
t = lround(creal(data->reorder[i + 2 * n]));
md_copy(4, line_dims, (vec + (wx * nc * tf * tid) + t * wx * nc), (src + i * wx * nc), CFL_SIZE);
}
}
md_zfmacc2(4, fmac_dims, phi_out_str, perm + (y + z * sy) * (wx * nc * tk), vec_str, vec + (wx * nc * tf * tid), phi_mat_str, data->phi);
}
}
long out_dims[] = { [0 ... DIMS - 1] = 1 };
out_dims[0] = wx;
out_dims[1] = sy;
out_dims[2] = sz;
out_dims[3] = nc;
out_dims[6] = tk;
unsigned int permute_order[DIMS] = {0, 4, 5, 1, 6, 2, 3, 7};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, out_dims, dst, perm_dims, perm, CFL_SIZE);
md_free(vec);
md_free(perm);
md_free(flags);
}
static void kern_normal(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long tk = data->phi_dims[6];
long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = wx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long input_str[DIMS];
md_calc_strides(DIMS, input_str, input_dims, CFL_SIZE);
long output_dims[DIMS];
md_copy_dims(DIMS, output_dims, input_dims);
output_dims[6] = 1;
output_dims[7] = tk;
long output_str[DIMS];
md_calc_strides(DIMS, output_str, output_dims, CFL_SIZE);
long gpu_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, gpu_kernel_dims, data->kernel_dims);
gpu_kernel_dims[0] = wx;
gpu_kernel_dims[3] = nc;
long kernel_str[DIMS];
md_calc_strides(DIMS, kernel_str, data->kernel_dims, CFL_SIZE);
long gpu_kernel_str[DIMS];
md_calc_strides(DIMS, gpu_kernel_str, gpu_kernel_dims, CFL_SIZE);
long fmac_dims[DIMS];
md_merge_dims(DIMS, fmac_dims, input_dims, data->kernel_dims);
md_clear(DIMS, output_dims, dst, CFL_SIZE);
#ifdef USE_CUDA
if(cuda_ondevice(src))
md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, gpu_kernel_str, data->gpu_kernel);
else
#endif
md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, kernel_str, data->kernel);
}
static void kern_free(const linop_data_t* _data)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
xfree(data->reorder_dims);
xfree(data->phi_dims);
xfree(data->table_dims);
xfree(data->kernel_dims);
#ifdef USE_CUDA
if (data->gpu_kernel != NULL)
md_free(data->gpu_kernel);
#endif
xfree(data);
}
static const struct linop_s* linop_kern_create(bool gpu_flag,
const long _reorder_dims[DIMS], complex float* reorder,
const long _phi_dims[DIMS], complex float* phi,
const long _kernel_dims[DIMS], complex float* kernel,
const long _table_dims[DIMS])
{
PTR_ALLOC(struct kern_s, data);
SET_TYPEID(kern_s, data);
PTR_ALLOC(long[DIMS], reorder_dims);
PTR_ALLOC(long[DIMS], phi_dims);
PTR_ALLOC(long[DIMS], table_dims);
PTR_ALLOC(long[DIMS], kernel_dims);
md_copy_dims(DIMS, *reorder_dims, _reorder_dims);
md_copy_dims(DIMS, *phi_dims, _phi_dims);
md_copy_dims(DIMS, *table_dims, _table_dims);
md_copy_dims(DIMS, *kernel_dims, _kernel_dims);
data->reorder_dims = *PTR_PASS(reorder_dims);
data->phi_dims = *PTR_PASS(phi_dims);
data->table_dims = *PTR_PASS(table_dims);
data->kernel_dims = *PTR_PASS(kernel_dims);
data->reorder = reorder;
data->phi = phi;
data->kernel = kernel;
data->gpu_kernel = NULL;
#ifdef USE_CUDA
if(gpu_flag) {
long repmat_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, repmat_kernel_dims, _kernel_dims);
repmat_kernel_dims[0] = _table_dims[0];
repmat_kernel_dims[3] = _table_dims[1];
long kernel_strs[DIMS];
long repmat_kernel_strs[DIMS];
md_calc_strides(DIMS, kernel_strs, _kernel_dims, CFL_SIZE);
md_calc_strides(DIMS, repmat_kernel_strs, repmat_kernel_dims, CFL_SIZE);
complex float* repmat_kernel = md_calloc(DIMS, repmat_kernel_dims, CFL_SIZE);
md_copy2(DIMS, repmat_kernel_dims, repmat_kernel_strs, repmat_kernel, kernel_strs, kernel, CFL_SIZE);
data->gpu_kernel = md_gpu_move(DIMS, repmat_kernel_dims, repmat_kernel, CFL_SIZE);
md_free(repmat_kernel);
}
#else
UNUSED(gpu_flag);
#endif
long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = _table_dims[0];
input_dims[1] = _kernel_dims[1];
input_dims[2] = _kernel_dims[2];
input_dims[3] = _table_dims[1];
input_dims[6] = _phi_dims[6];
long output_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
output_dims[0] = _table_dims[0];
output_dims[1] = _table_dims[1];
output_dims[2] = _reorder_dims[0];
const struct linop_s* K = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), kern_apply, kern_adjoint, kern_normal, NULL, kern_free);
return K;
}
struct multc_s {
INTERFACE(linop_data_t);
unsigned int nc;
unsigned int md;
const complex float* maps;
const struct linop_s* sc_op; // Single channel operator.
};
static DEF_TYPEID(multc_s);
static void multc_apply(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* fwd = data->sc_op->forward;
const long* sc_inp_dims = linop_domain(data->sc_op)->dims;
const long* sc_out_dims = linop_codomain(data->sc_op)->dims;
long sx = sc_inp_dims[0];
long sy = sc_inp_dims[1];
long sz = sc_inp_dims[2];
long wx = sc_out_dims[0];
long n = sc_out_dims[2];
long nc = data->nc;
long md = data->md;
long src_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, src_dims, sc_inp_dims);
src_dims[MAPS_DIM] = md;
long dst_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dst_dims, sc_out_dims);
dst_dims[1] = nc;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer = md_alloc_sameplace(DIMS, sc_inp_dims, CFL_SIZE, src);
long tbl_dims[] = { [0 ... DIMS - 1] = 1};
tbl_dims[0] = wx;
tbl_dims[1] = n;
tbl_dims[2] = nc;
complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src);
md_clear(DIMS, tbl_dims, tbl, CFL_SIZE);
long pos[] = { [0 ... DIMS - 1] = 0 };
long zfmac_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, zfmac_dims, src_dims);
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_src[DIMS];
md_calc_strides(DIMS, strides_src, src_dims, CFL_SIZE);
long strides_sc_inp[DIMS];
md_calc_strides(DIMS, strides_sc_inp, sc_inp_dims, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_inp_dims, buffer, CFL_SIZE);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmac2(DIMS, zfmac_dims, strides_sc_inp, buffer, strides_src, src, strides_single_map, single_map);
operator_apply(fwd, DIMS, sc_out_dims, tbl + (wx * n * k), DIMS, sc_inp_dims, buffer);
}
md_clear(DIMS, dst_dims, dst, CFL_SIZE);
unsigned int permute_order[DIMS] = {0, 2, 1};
for (unsigned int i = 3; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, dst_dims, dst, tbl_dims, tbl, CFL_SIZE);
md_free(single_map);
md_free(buffer);
md_free(tbl);
}
static void multc_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* adj = data->sc_op->adjoint;
const long* sc_inp_dims = linop_codomain(data->sc_op)->dims;
const long* sc_out_dims = linop_domain(data->sc_op)->dims;
long sx = sc_out_dims[0];
long sy = sc_out_dims[1];
long sz = sc_out_dims[2];
long wx = sc_inp_dims[0];
long n = sc_inp_dims[2];
long nc = data->nc;
long md = data->md;
long src_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, src_dims, sc_inp_dims);
src_dims[1] = nc;
long dst_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dst_dims, sc_out_dims);
dst_dims[MAPS_DIM] = md;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer1 = md_alloc_sameplace(DIMS, sc_out_dims, CFL_SIZE, src);
complex float* buffer2 = md_alloc_sameplace(DIMS, dst_dims, CFL_SIZE, src);
long tbl_dims[] = { [0 ... DIMS - 1] = 1};
tbl_dims[0] = wx;
tbl_dims[2] = n;
complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src);
long pos[] = { [0 ... DIMS - 1] = 0 };
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_sc_out[DIMS];
md_calc_strides(DIMS, strides_sc_out, sc_out_dims, CFL_SIZE);
long strides_dst[DIMS];
md_calc_strides(DIMS, strides_dst, dst_dims, CFL_SIZE);
md_clear(DIMS, dst_dims, dst, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_out_dims, buffer1, CFL_SIZE);
md_clear(DIMS, dst_dims, buffer2, CFL_SIZE);
md_clear(DIMS, tbl_dims, tbl, CFL_SIZE);
pos[1] = k;
md_slice(DIMS, 2, pos, src_dims, tbl, src, CFL_SIZE);
pos[1] = 0;
operator_apply(adj, DIMS, sc_out_dims, buffer1, DIMS, tbl_dims, tbl);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmacc2(DIMS, dst_dims, strides_dst, buffer2, strides_sc_out, buffer1, strides_single_map, single_map);
md_zadd(DIMS, dst_dims, dst, dst, buffer2);
}
md_free(single_map);
md_free(buffer1);
md_free(buffer2);
md_free(tbl);
}
static void multc_normal(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* nrm = data->sc_op->normal;
const long* sc_dims = linop_domain(data->sc_op)->dims;
long sx = sc_dims[0];
long sy = sc_dims[1];
long sz = sc_dims[2];
long nc = data->nc;
long md = data->md;
long dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dims, sc_dims);
dims[MAPS_DIM] = md;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer1 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src);
complex float* buffer2 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src);
complex float* buffer3 = md_alloc_sameplace(DIMS, dims, CFL_SIZE, src);
long pos[] = { [0 ... DIMS - 1] = 0 };
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_sc[DIMS];
md_calc_strides(DIMS, strides_sc, sc_dims, CFL_SIZE);
long strides[DIMS];
md_calc_strides(DIMS, strides, dims, CFL_SIZE);
md_clear(DIMS, dims, dst, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_dims, buffer1, CFL_SIZE);
md_clear(DIMS, sc_dims, buffer2, CFL_SIZE);
md_clear(DIMS, dims, buffer3, CFL_SIZE);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmac2(DIMS, dims, strides_sc, buffer1, strides, src, strides_single_map, single_map);
operator_apply(nrm, DIMS, sc_dims, buffer2, DIMS, sc_dims, buffer1);
md_zfmacc2(DIMS, dims, strides, buffer3, strides_sc, buffer2, strides_single_map, single_map);
md_zadd(DIMS, dims, dst, dst, buffer3);
}
md_free(single_map);
md_free(buffer1);
md_free(buffer2);
md_free(buffer3);
}
static void multc_free(const linop_data_t* _data)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
xfree(data);
}
static struct linop_s* linop_multc_create(long nc, long md, const complex float* maps, const struct linop_s* sc_op)
{
PTR_ALLOC(struct multc_s, data);
SET_TYPEID(multc_s, data);
data->nc = nc;
data->md = md;
data->maps = maps;
data->sc_op = sc_op;
long* op_inp_dims = (long*) linop_domain(sc_op)->dims;
long* op_out_dims = (long*) linop_codomain(sc_op)->dims;
long input_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, input_dims, op_inp_dims);
input_dims[MAPS_DIM] = md;
long output_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, output_dims, op_out_dims);
output_dims[1] = nc;
struct linop_s* E = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), multc_apply, multc_adjoint, multc_normal, NULL, multc_free);
return E;
}
/* Resize operator. */
static const struct linop_s* linop_wavereshape_create(long wx, long sx, long sy, long sz, long nc, long tk)
{
long input_dims[] = { [0 ... DIMS - 1] = 1};
input_dims[0] = sx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long output_dims[DIMS];
md_copy_dims(DIMS, output_dims, input_dims);
output_dims[0] = wx;
struct linop_s* R = linop_resize_create(DIMS, output_dims, input_dims);
return R;
}
/* Fx operator. */
static const struct linop_s* linop_fx_create(long wx, long sy, long sz, long nc, long tk, bool centered)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
struct linop_s* Fx = NULL;
if (centered)
Fx = linop_fftc_create(DIMS, dims, READ_FLAG);
else
Fx = linop_fft_create(DIMS, dims, READ_FLAG);
return Fx;
}
/* Wave operator. */
static const struct linop_s* linop_wave_create(long wx, long sy, long sz, long nc, long tk, long psf_tk, complex float* psf)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
return (psf_tk > 1) ? linop_cdiag_create(DIMS, dims, FFT_FLAGS | COEFF_FLAG, psf) : linop_cdiag_create(DIMS, dims, FFT_FLAGS, psf);
}
/* Fyz operator. */
static const struct linop_s* linop_fyz_create(long wx, long sy, long sz, long nc, long tk, bool centered)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
struct linop_s* Fyz = NULL;
if (centered)
Fyz = linop_fftc_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG);
else
Fyz = linop_fft_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG);
return Fyz;
}
/* Construction sampling temporal kernel.*/
static void construct_kernel(
long mask_dims[DIMS], complex float* mask,
long phi_dims[DIMS], complex float* phi,
long kern_dims[DIMS], complex float* kern)
{
long sy = mask_dims[1];
long sz = mask_dims[2];
long tf = phi_dims[5];
long tk = phi_dims[6];
long cvec_dims[] = { [0 ... DIMS - 1] = 1 };
cvec_dims[6] = tk;
long cvec_str[DIMS];
md_calc_strides(DIMS, cvec_str, cvec_dims, CFL_SIZE);
complex float cvec[tk];
long tvec_dims[] = { [0 ... DIMS - 1] = 1 };
tvec_dims[5] = tf;
long tvec_str[DIMS];
md_calc_strides(DIMS, tvec_str, tvec_dims, CFL_SIZE);
complex float mvec[tf];
complex float tvec1[tf];
complex float tvec2[tf];
long phi_str[DIMS];
md_calc_strides(DIMS, phi_str, phi_dims, CFL_SIZE);
long out_dims[] = { [0 ... DIMS - 1] = 1 };
out_dims[0] = tk;
out_dims[1] = sy;
out_dims[2] = sz;
out_dims[3] = tk;
complex float* out = md_calloc(DIMS, out_dims, CFL_SIZE);
for (int y = 0; y < sy; y ++) {
for (int z = 0; z < sz; z ++) {
for (int t = 0; t < tf; t ++)
mvec[t] = mask[(y + sy * z) + (sy * sz) * t];
for (int t = 0; t < tk; t ++) {
cvec[t] = 1;
md_clear(DIMS, tvec_dims, tvec1, CFL_SIZE);
md_zfmac2(DIMS, phi_dims, tvec_str, tvec1, cvec_str, cvec, phi_str, phi);
md_clear(DIMS, tvec_dims, tvec2, CFL_SIZE);
md_zfmac2(DIMS, tvec_dims, tvec_str, tvec2, tvec_str, tvec1, tvec_str, mvec);
md_clear(DIMS, cvec_dims, out + y * tk + z * sy * tk + t * sy * sz * tk, CFL_SIZE);
md_zfmacc2(DIMS, phi_dims, cvec_str, out + y * tk + z * sy * tk + t * sy * sz * tk,
tvec_str, tvec2, phi_str, phi);
cvec[t] = 0;
}
}
}
unsigned int permute_order[DIMS] = {4, 1, 2, 5, 6, 7, 3, 0};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, kern_dims, kern, out_dims, out, CFL_SIZE);
md_free(out);
}
static void fftmod_apply(long sy, long sz,
long reorder_dims[DIMS], complex float* reorder,
long table_dims[DIMS], complex float* table,
long maps_dims[DIMS], complex float* maps)
{
long wx = table_dims[0];
long nc = table_dims[1];
fftmod(DIMS, table_dims, READ_FLAG, table, table);
fftmod(DIMS, maps_dims, FFT_FLAGS, maps, maps);
long y = -1;
long z = -1;
double dy = ((double) sy/2)/((double) sy);
double dz = ((double) sz/2)/((double) sz);
complex float py = 1;
complex float pz = 1;
long dims[] = { [0 ... DIMS] = 1};
dims[0] = wx;
dims[1] = nc;
long n = reorder_dims[0];
for (long k = 0; k < n; k++) {
y = lround(creal(reorder[k]));
z = lround(creal(reorder[k + n]));
py = cexp(2.i * M_PI * dy * y);
pz = cexp(2.i * M_PI * dz * z);
md_zsmul(DIMS, dims, table + k * wx * nc, table + k * wx * nc, py * pz);
}
}
int main_wshfl(int argc, char* argv[argc])
{
double start_time = timestamp();
struct opt_reg_s ropts;
opt_reg_init(&ropts);
int maxiter = 30;
int cgiter = 10;
int blksize = 8;
float rho = 1;
bool hgwld = false;
bool ksp = false;
const char* fwd = NULL;
const char* x0 = NULL;
bool use_gpu = false;
bool dcx = false;
const struct opt_s opts[] = {
{ 'R', NULL, true, opt_reg, &ropts, "<T>:A:B:C\tGeneralized regularization options. (-Rh for help)" },
OPT_INT( 'b', &blksize, "blkdim", "Block size for locally low rank."),
OPT_INT( 'i', &maxiter, "mxiter", "Maximum number of iterations."),
OPT_INT( 'j', &cgiter, "cgiter", "Maximum number of CG iterations in ADMM."),
OPT_FLOAT( 's', &rho, "admrho", "ADMM Rho value."),
OPT_STRING( 'F', &fwd, "frwrd", "Go from shfl-coeffs to data-table. Pass in coeffs path."),
OPT_STRING( 'O', &x0, "initl", "Initialize reconstruction with guess."),
OPT_SET( 'g', &use_gpu, "use GPU."),
OPT_SET( 'K', &ksp, "Go from data-table to shuffling basis k-space."),
OPT_SET( 'H', &hgwld, "Use hogwild."),
OPT_SET( 'v', &dcx, "Split coefficients to real and imaginary components."),
};
cmdline(&argc, argv, 6, 6, usage_str, help_str, ARRAY_SIZE(opts), opts);
struct admm_conf admm = { false, false, false, rho, cgiter };
debug_printf(DP_INFO, "Loading data... ");
long maps_dims[DIMS];
complex float* maps = load_cfl(argv[1], DIMS, maps_dims);
long wave_dims[DIMS];
complex float* wave = load_cfl(argv[2], DIMS, wave_dims);
long phi_dims[DIMS];
complex float* phi = load_cfl(argv[3], DIMS, phi_dims);
long reorder_dims[DIMS];
complex float* reorder = load_cfl(argv[4], DIMS, reorder_dims);
long table_dims[DIMS];
complex float* table = load_cfl(argv[5], DIMS, table_dims);
debug_printf(DP_INFO, "Done.\n");
(use_gpu ? num_init_gpu : num_init)();
int wx = wave_dims[0];
int sx = maps_dims[0];
int sy = maps_dims[1];
int sz = maps_dims[2];
int nc = maps_dims[3];
int md = maps_dims[4];
int tf = phi_dims[5];
int tk = phi_dims[6];
debug_printf(DP_INFO, "Constructing sampling mask from reorder table... ");
long mask_dims[] = { [0 ... DIMS - 1] = 1 };
mask_dims[1] = sy;
mask_dims[2] = sz;
mask_dims[5] = tf;
complex float* mask = md_calloc(DIMS, mask_dims, CFL_SIZE);
construct_mask(reorder_dims, reorder, mask_dims, mask);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Constructing sampling-temporal kernel... ");
long kernel_dims[] = { [0 ... DIMS - 1] = 1 };
kernel_dims[1] = sy;
kernel_dims[2] = sz;
kernel_dims[6] = tk;
kernel_dims[7] = tk;
complex float* kernel = md_calloc(DIMS, kernel_dims, CFL_SIZE);
construct_kernel(mask_dims, mask, phi_dims, phi, kernel_dims, kernel);
md_free(mask);
debug_printf(DP_INFO, "Done.\n");
long coeff_dims[] = { [0 ... DIMS - 1] = 1 };
coeff_dims[0] = sx;
coeff_dims[1] = sy;
coeff_dims[2] = sz;
coeff_dims[4] = md;
coeff_dims[6] = tk;
coeff_dims[8] = dcx ? 2 : 1;
if (ksp == true) {
const struct linop_s* Knc = linop_kern_create(use_gpu, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, table_dims);
long ksp_dims[] = { [0 ... DIMS - 1] = 1 };
ksp_dims[0] = wx;
ksp_dims[1] = sy;
ksp_dims[2] = sz;
ksp_dims[3] = nc;
ksp_dims[6] = tk;
complex float* res = create_cfl(argv[6], DIMS, ksp_dims);
operator_apply(Knc->adjoint, DIMS, ksp_dims, res, DIMS, table_dims, table);
linop_free(Knc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, ksp_dims, res);
return 0;
}
debug_printf(DP_INFO, "Creating single channel linear operators:\n");
double t1;
double t2;
t1 = timestamp();
const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk);
t2 = timestamp();
debug_printf(DP_INFO, "\tR: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* Fx = linop_fx_create(wx, sy, sz, 1, tk, false);
t2 = timestamp();
debug_printf(DP_INFO, "\tFx: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave_dims[COEFF_DIM], wave);
t2 = timestamp();
debug_printf(DP_INFO, "\tW: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* Fyz = linop_fyz_create(wx, sy, sz, 1, tk, false);
t2 = timestamp();
debug_printf(DP_INFO, "\tFyz: %f seconds.\n", t2 - t1);
t1 = timestamp();
long single_channel_table_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_channel_table_dims, table_dims);
single_channel_table_dims[1] = 1;
const struct linop_s* K = linop_kern_create(use_gpu, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims);
t2 = timestamp();
debug_printf(DP_INFO, "\tK: %f seconds.\n", t2 - t1);
struct linop_s* A_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF(
R, Fx), W), Fyz), K);
debug_printf(DP_INFO, "Single channel forward operator information:\n");
print_opdims(A_sc);
struct linop_s* A = linop_multc_create(nc, md, maps, A_sc);
debug_printf(DP_INFO, "Overall forward linear operator information:\n");
print_opdims(A);
if (fwd != NULL) {
debug_printf(DP_INFO, "Going from coefficients to data table... ");
complex float* coeffs_to_fwd = load_cfl(fwd, DIMS, coeff_dims);
complex float* table_forward = create_cfl(argv[6], DIMS, table_dims);
const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk);
const struct linop_s* CFx = linop_fx_create( wx, sy, sz, 1, tk, true);
const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave_dims[COEFF_DIM], wave);
const struct linop_s* CFyz = linop_fyz_create(wx, sy, sz, 1, tk, true);
const struct linop_s* K = linop_kern_create(use_gpu, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims);
struct linop_s* AC_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF(
R, CFx), W), CFyz), K);
struct linop_s* AC = linop_multc_create(nc, md, maps, AC_sc);
operator_apply(AC->forward, DIMS, table_dims, table_forward, DIMS, coeff_dims, coeffs_to_fwd);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Cleaning up... ");
linop_free(AC);
linop_free(AC_sc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, table_dims, table_forward);
debug_printf(DP_INFO, "Done.\n");
return 0;
}
if (dcx) {
debug_printf(DP_INFO, "\tSplitting result into real and imaginary components.\n");
struct linop_s* tmp = A;
struct linop_s* dcxop = linop_decompose_complex_create(DIMS, ITER_DIM, linop_domain(A)->dims);
A = linop_chain(dcxop, tmp);
debug_printf(DP_INFO, "New operator information:\n");
print_opdims(A);
linop_free(dcxop);
linop_free(tmp);
}
debug_printf(DP_INFO, "Normalizing data table and applying fftmod to table and maps... ");
float norm = md_znorm(DIMS, table_dims, table);
md_zsmul(DIMS, table_dims, table, table, 1. / norm);
fftmod_apply(sy, sz, reorder_dims, reorder, table_dims, table, maps_dims, maps);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Preparing reconstruction operator... ");
const struct operator_p_s* thresh_ops[NUM_REGS] = { NULL };
const struct linop_s* trafos[NUM_REGS] = { NULL };
opt_reg_configure(DIMS, coeff_dims, &ropts, thresh_ops, trafos, blksize, 1, use_gpu);
int nr_penalties = ropts.r;
struct reg_s* regs = ropts.regs;
enum algo_t algo = ALGO_ADMM;
struct iter it = italgo_config(algo, nr_penalties, regs, maxiter, -1, hgwld, false, admm, 1, false);
debug_printf(DP_INFO, "Done.\n");
complex float* init = NULL;
if (x0 != NULL) {
debug_printf(DP_INFO, "Loading in initial guess... ");
init = load_cfl(x0, DIMS, coeff_dims);
debug_printf(DP_INFO, "Done.\n");
}
debug_printf(DP_INFO, "Reconstruction... ");
complex float* recon = create_cfl(argv[6], DIMS, coeff_dims);
struct lsqr_conf lsqr_conf = lsqr_defaults;
lsqr_conf.lambda = 0.;
lsqr_conf.it_gpu = use_gpu;
double recon_start = timestamp();
const struct operator_p_s* J = lsqr2_create(&lsqr_conf, it.italgo, it.iconf, (const float*) init, A, NULL, nr_penalties, thresh_ops, trafos, NULL);
operator_p_apply(J, 1., DIMS, coeff_dims, recon, DIMS, table_dims, table);
md_zsmul(DIMS, coeff_dims, recon, recon, norm);
double recon_end = timestamp();
debug_printf(DP_INFO, "Done.\nReconstruction time: %f seconds.\n", recon_end - recon_start);
debug_printf(DP_INFO, "Cleaning up and saving result... ");
operator_p_free(J);
linop_free(A);
linop_free(A_sc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, coeff_dims, recon);
if (x0 != NULL)
unmap_cfl(DIMS, coeff_dims, init);
debug_printf(DP_INFO, "Done.\n");
double end_time = timestamp();
debug_printf(DP_INFO, "Total time: %f seconds.\n", end_time - start_time);
return 0;
}
|
fgm_interface.omp.c |
#include "fgm.h"
#include "omp.h"
int fgmLaunch(uint32_t * list, uint32_t listLen, struct cgmResult * cgmData, int cgmDLen) //return value is a success/fail indicator
{
int i, x;
mData ** mutation; //keeping track of our mutations...
mutation = malloc(sizeof(mData*) * cgmDLen); //make an array of pointers corresponding to the length of the cgmData passed in.
omp_set_num_threads(8);
#pragma omp parallel for
for(x = 0; x < cgmDLen; x++) //run through all elements of our batch.
{
fgm(&(mutation[x]),list, listLen, 48, cgmData[x]->matches, cgmData[x]->length, cgmData[x]->read); //try to obtain a fine grain match of the given readSequence.
// printf("list\t\tlistlen\t48\tmatches\tlength\tread\t\n%x\t%d\t%d\t%d\t%d\n", list[0], listLen, 48, cgmData[x]->matches[0], cgmData[x]->length, cgmData[x]->read);
}
#pragma omp single
{
for(x = 0; x < cgmDLen; x++) //printout loop.
{
if(mutation[x]->len != -1) //make sure there's actually data to print out...
{
printf("%d\n", mutation[x]->len);
for(i = 0; i < mutation[x]->len; i++) //iterate over the mutation list, find all differences.
{
switch(mutation[x]->ins[i])
{
case DEL:
printf("Type: DEL Location: %u\n", mutation[x]->ins[i], mutation[x]->locs[i]);
break;
case SNP:
printf("Type: SNP Mutation: %c Location: %u\n", mutation[x]->ins[i], mutation[x]->mods[i], mutation[x]->locs[i]);
break;
case INS:
printf("Type: INS Mutation: %c Location: %u\n", mutation[x]->ins[i], mutation[x]->mods[i], mutation[x]->locs[i]);
break;
default:
break; //silently fail..
}
}
//clear up the data that was created by the fine grain matcher.
// free(mutation[x]->mods);
// free(mutation[x]->locs);
// free(mutation[x]->ins);
// free(mutation[x]);
//if these are not cleared, there's a memory leak. each call to fgm will create a new mData.
}
//else DO NOTHING... if we can't generate a match, then we'll fail silently.
}
free(mutation); //free the pointer array.
}
return 0;
}
|
GB_msort_1.c | //------------------------------------------------------------------------------
// GB_msort_1: sort a 1-by-n list of integers
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// A parallel mergesort of an array of 1-by-n integers.
#include "GB_msort_1.h"
//------------------------------------------------------------------------------
// GB_msort_1_binary_search: binary search for the pivot
//------------------------------------------------------------------------------
// The Pivot value is Y [pivot], and a binary search for the Pivot is made in
// the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on
// input. The return value is pleft, where
//
// X [p_start ... pleft-1] <= Pivot and
// X [pleft ... p_end-1] >= Pivot holds.
//
// pleft is returned in the range p_start to p_end. If pleft is p_start, then
// the Pivot is smaller than all entries in X [p_start...p_end-1], and the left
// list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is
// larger than all entries in X [p_start...p_end-1], and the right list X
// [pleft...p_end-1] is empty.
static int64_t GB_msort_1_binary_search // return pleft
(
const int64_t *restrict Y_0, // Pivot is Y [pivot]
const int64_t pivot,
const int64_t *restrict X_0, // search in X [p_start..p_end_-1]
const int64_t p_start,
const int64_t p_end
)
{
//--------------------------------------------------------------------------
// find where the Pivot appears in X
//--------------------------------------------------------------------------
// binary search of X [p_start...p_end-1] for the Pivot
int64_t pleft = p_start ;
int64_t pright = p_end - 1 ;
while (pleft < pright)
{
int64_t pmiddle = (pleft + pright) >> 1 ;
// less = (X [pmiddle] < Pivot)
bool less = GB_lt_1 (X_0, pmiddle,
Y_0, pivot) ;
pleft = less ? (pmiddle+1) : pleft ;
pright = less ? pright : pmiddle ;
}
// binary search is narrowed down to a single item
// or it has found the list is empty:
ASSERT (pleft == pright || pleft == pright + 1) ;
// If found is true then X [pleft == pright] == Pivot. If duplicates
// appear then X [pleft] is any one of the entries equal to the Pivot
// in the list. If found is false then
// X [p_start ... pleft-1] < Pivot and
// X [pleft+1 ... p_end-1] > Pivot holds.
// The value X [pleft] may be either < or > Pivot.
bool found = (pleft == pright) && GB_eq_1 (X_0, pleft,
Y_0, pivot) ;
// Modify pleft and pright:
if (!found && (pleft == pright))
{
if (GB_lt_1 (X_0, pleft,
Y_0, pivot))
{
pleft++ ;
}
else
{
// pright++ ; // (not needed)
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
// If found is false then
// X [p_start ... pleft-1] < Pivot and
// X [pleft ... p_end-1] > Pivot holds,
// and pleft-1 == pright
// If X has no duplicates, then whether or not Pivot is found,
// X [p_start ... pleft-1] < Pivot and
// X [pleft ... p_end-1] >= Pivot holds.
// If X has duplicates, then whether or not Pivot is found,
// X [p_start ... pleft-1] <= Pivot and
// X [pleft ... p_end-1] >= Pivot holds.
return (pleft) ;
}
//------------------------------------------------------------------------------
// GB_msort_1_create_merge_tasks
//------------------------------------------------------------------------------
// Recursively constructs ntasks tasks to merge two arrays, Left and Right,
// into Sresult, where Left is L [pL_start...pL_end-1], Right is R
// [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1],
// and where total_work is the total size of Left and Right.
//
// Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and
// R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output
// array S [S_task [tid] ... ]. The task tids created are t0 to
// t0+ntasks-1.
void GB_msort_1_create_merge_tasks
(
// output:
int64_t *restrict L_task, // L_task [t0...t0+ntasks-1] computed
int64_t *restrict L_len, // L_len [t0...t0+ntasks-1] computed
int64_t *restrict R_task, // R_task [t0...t0+ntasks-1] computed
int64_t *restrict R_len, // R_len [t0...t0+ntasks-1] computed
int64_t *restrict S_task, // S_task [t0...t0+ntasks-1] computed
// input:
const int t0, // first task tid to create
const int ntasks, // # of tasks to create
const int64_t pS_start, // merge into S [pS_start...]
const int64_t *restrict L_0, // Left = L [pL_start...pL_end-1]
const int64_t pL_start,
const int64_t pL_end,
const int64_t *restrict R_0, // Right = R [pR_start...pR_end-1]
const int64_t pR_start,
const int64_t pR_end
)
{
//--------------------------------------------------------------------------
// get problem size
//--------------------------------------------------------------------------
int64_t nleft = pL_end - pL_start ; // size of Left array
int64_t nright = pR_end - pR_start ; // size of Right array
int64_t total_work = nleft + nright ; // total work to do
ASSERT (ntasks >= 1) ;
ASSERT (total_work > 0) ;
//--------------------------------------------------------------------------
// create the tasks
//--------------------------------------------------------------------------
if (ntasks == 1)
{
//----------------------------------------------------------------------
// a single task will merge all of Left and Right into Sresult
//----------------------------------------------------------------------
L_task [t0] = pL_start ; L_len [t0] = nleft ;
R_task [t0] = pR_start ; R_len [t0] = nright ;
S_task [t0] = pS_start ;
}
else
{
//----------------------------------------------------------------------
// partition the Left and Right arrays for multiple merge tasks
//----------------------------------------------------------------------
int64_t pleft, pright ;
if (nleft >= nright)
{
// split Left in half, and search for its pivot in Right
pleft = (pL_end + pL_start) >> 1 ;
pright = GB_msort_1_binary_search (
L_0, pleft,
R_0, pR_start, pR_end) ;
}
else
{
// split Right in half, and search for its pivot in Left
pright = (pR_end + pR_start) >> 1 ;
pleft = GB_msort_1_binary_search (
R_0, pright,
L_0, pL_start, pL_end) ;
}
//----------------------------------------------------------------------
// partition the tasks according to the work of each partition
//----------------------------------------------------------------------
// work0 is the total work in the first partition
int64_t work0 = (pleft - pL_start) + (pright - pR_start) ;
int ntasks0 = (int) round ((double) ntasks *
(((double) work0) / ((double) total_work))) ;
// ensure at least one task is assigned to each partition
ntasks0 = GB_IMAX (ntasks0, 1) ;
ntasks0 = GB_IMIN (ntasks0, ntasks-1) ;
int ntasks1 = ntasks - ntasks0 ;
//----------------------------------------------------------------------
// assign ntasks0 to the first half
//----------------------------------------------------------------------
// ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1]
// into the result S [pS_start...work0-1].
GB_msort_1_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start,
L_0, pL_start, pleft,
R_0, pR_start, pright) ;
//----------------------------------------------------------------------
// assign ntasks1 to the second half
//----------------------------------------------------------------------
// ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1]
// into the result S [pS_start+work0...pS_start+total_work].
int t1 = t0 + ntasks0 ; // first task id of the second set of tasks
int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S
GB_msort_1_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1,
L_0, pleft, pL_end,
R_0, pright, pR_end) ;
}
}
//------------------------------------------------------------------------------
// GB_msort_1_merge: merge two sorted lists via a single thread
//------------------------------------------------------------------------------
// merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */
static void GB_msort_1_merge
(
int64_t *restrict S_0, // output of length nleft + nright
const int64_t *restrict Left_0, // left input of length nleft
const int64_t nleft,
const int64_t *restrict Right_0, // right input of length nright
const int64_t nright
)
{
int64_t p, pleft, pright ;
// merge the two inputs, Left and Right, while both inputs exist
for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++)
{
if (GB_lt_1 (Left_0, pleft,
Right_0, pright))
{
// S [p] = Left [pleft++]
S_0 [p] = Left_0 [pleft] ;
pleft++ ;
}
else
{
// S [p] = Right [pright++]
S_0 [p] = Right_0 [pright] ;
pright++ ;
}
}
// either input is exhausted; copy the remaining list into S
if (pleft < nleft)
{
int64_t nremaining = (nleft - pleft) ;
memcpy (S_0 + p, Left_0 + pleft, nremaining * sizeof (int64_t)) ;
}
else if (pright < nright)
{
int64_t nremaining = (nright - pright) ;
memcpy (S_0 + p, Right_0 + pright, nremaining * sizeof (int64_t)) ;
}
}
//------------------------------------------------------------------------------
// GB_msort_1: parallel mergesort
//------------------------------------------------------------------------------
GB_PUBLIC
GrB_Info GB_msort_1 // sort array A of size 1-by-n
(
int64_t *restrict A_0, // size n array
const int64_t n,
int nthreads // # of threads to use
)
{
//--------------------------------------------------------------------------
// handle small problems with a single thread
//--------------------------------------------------------------------------
if (nthreads <= 1 || n <= GB_BASECASE)
{
// sequential quicksort
GB_qsort_1 (A_0, n) ;
return (GrB_SUCCESS) ;
}
//--------------------------------------------------------------------------
// determine # of tasks
//--------------------------------------------------------------------------
// determine the number of levels to create, which must always be an
// even number. The # of levels is chosen to ensure that the # of leaves
// of the task tree is between 4*nthreads and 16*nthreads.
// 2 to 4 threads: 4 levels, 16 qsort leaves
// 5 to 16 threads: 6 levels, 64 qsort leaves
// 17 to 64 threads: 8 levels, 256 qsort leaves
// 65 to 256 threads: 10 levels, 1024 qsort leaves
// 256 to 1024 threads: 12 levels, 4096 qsort leaves
// ...
int k = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ;
int ntasks = 1 << k ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
int64_t *restrict W = NULL ; size_t W_size = 0 ;
W = GB_MALLOC_WERK (n + 6*ntasks + 1, int64_t, &W_size) ;
if (W == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
int64_t *T = W ;
int64_t *restrict W_0 = T ; T += n ;
int64_t *restrict L_task = T ; T += ntasks ;
int64_t *restrict L_len = T ; T += ntasks ;
int64_t *restrict R_task = T ; T += ntasks ;
int64_t *restrict R_len = T ; T += ntasks ;
int64_t *restrict S_task = T ; T += ntasks ;
int64_t *restrict Slice = T ; T += (ntasks+1) ;
//--------------------------------------------------------------------------
// partition and sort the leaves
//--------------------------------------------------------------------------
GB_eslice (Slice, n, ntasks) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t leaf = Slice [tid] ;
int64_t leafsize = Slice [tid+1] - leaf ;
GB_qsort_1 (A_0 + leaf, leafsize) ;
}
//--------------------------------------------------------------------------
// merge each level
//--------------------------------------------------------------------------
int nt = 1 ;
for ( ; k >= 2 ; k -= 2)
{
//----------------------------------------------------------------------
// merge level k into level k-1, from A into W
//----------------------------------------------------------------------
// TODO: skip k and k-1 for each group of 4 sublists of A if they are
// already sorted with respect to each other.
// this could be done in parallel if ntasks was large
for (int tid = 0 ; tid < ntasks ; tid += 2*nt)
{
// create 2*nt tasks to merge two A sublists into one W sublist
GB_msort_1_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid],
A_0, Slice [tid], Slice [tid+nt],
A_0, Slice [tid+nt], Slice [tid+2*nt]) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..]
int64_t pL = L_task [tid], nL = L_len [tid] ;
int64_t pR = R_task [tid], nR = R_len [tid] ;
int64_t pS = S_task [tid] ;
GB_msort_1_merge (
W_0 + pS,
A_0 + pL, nL,
A_0 + pR, nR) ;
}
nt = 2*nt ;
//----------------------------------------------------------------------
// merge level k-1 into level k-2, from W into A
//----------------------------------------------------------------------
// this could be done in parallel if ntasks was large
for (int tid = 0 ; tid < ntasks ; tid += 2*nt)
{
// create 2*nt tasks to merge two W sublists into one A sublist
GB_msort_1_create_merge_tasks (
L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid],
W_0, Slice [tid], Slice [tid+nt],
W_0, Slice [tid+nt], Slice [tid+2*nt]) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..]
int64_t pL = L_task [tid], nL = L_len [tid] ;
int64_t pR = R_task [tid], nR = R_len [tid] ;
int64_t pS = S_task [tid] ;
GB_msort_1_merge (
A_0 + pS,
W_0 + pL, nL,
W_0 + pR, nR) ;
}
nt = 2*nt ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WERK (&W, W_size) ;
return (GrB_SUCCESS) ;
}
|
GB_unop__identity_uint8_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_bool)
// op(A') function: GB (_unop_tran__identity_uint8_bool)
// C type: uint8_t
// A type: bool
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_bool)
(
uint8_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
taskloop_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -triple x86_64-unknown-unknown -verify %s
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -triple x86_64-unknown-unknown -verify %s
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop'}}
#pragma omp taskloop
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp taskloop'}}
#pragma omp taskloop foo
void test_no_clause() {
int i;
#pragma omp taskloop
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp taskloop' must be a for loop}}
#pragma omp taskloop
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp taskloop
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop' are ignored}}
#pragma omp taskloop foo bar
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{directive '#pragma omp taskloop' cannot contain more than one 'nogroup' clause}}
#pragma omp taskloop nogroup nogroup
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop' are ignored}}
#pragma omp taskloop;
for (i = 0; i < 16; ++i)
;
// expected-warning@+3 {{extra tokens at the end of '#pragma omp taskloop' are ignored}}
// expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp taskloop'}}
#pragma omp parallel
#pragma omp taskloop linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop' are ignored}}
#pragma omp taskloop private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp taskloop' are ignored}}
#pragma omp taskloop, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp taskloop collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp taskloop collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp taskloop collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp taskloop collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp taskloop' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp taskloop collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop', but found only 1}}
#pragma omp parallel
#pragma omp taskloop collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp taskloop collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp taskloop', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp taskloop collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expression is not an integer constant expression}}
#pragma omp taskloop collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp taskloop collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp taskloop collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp taskloop collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp taskloop private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp taskloop private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp taskloop private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp taskloop lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp taskloop lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp taskloop lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp taskloop firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp taskloop firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp taskloop firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp taskloop firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp taskloop lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp taskloop lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp taskloop
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp taskloop
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp taskloop
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
|
gather_avx2.c | // create a list of 64 numbers, and only sum the even ones
#include <stdio.h>
#include <stdlib.h>
#define N 32000
int main() {
srand(time(NULL));
float *numbers = malloc(sizeof(float)*N);
int *mask = malloc(sizeof(int)*N);
// Init the numbers
for (int i = 0; i<N; i++) numbers[i] = rand() % 10;
for (int i = 0; i<N; i++) mask[i] = rand() % N;
/*for (int i = 0; i<8; i++) printf("%.1f ", numbers[i]);
puts("\n---");
for (int i = 0; i<8; i++) printf("%d ", mask[i]);
puts("\n---");*/
//Serial
float result1 = 0;
for (int i = 0; i<N; i++) {
result1 += numbers[mask[i]];
}
float result2 = 0;
#pragma omp simd reduction(+:result2) simdlen(8)
for (int i = 0; i<N; i++) {
result2 += numbers[mask[i]];
}
// print
printf("Result1: %f | Result2: %f\n", result1, result2);
return 0;
}
|
DataDependency.c | #include <math.h>
#include <omp.h>
void a(int *x, int *y, int n) {
double factor = 1;
for (int i = 0; i < n; i++) {
x[i] = factor * y[i];
factor = factor / 2;
}
}
void a_sol(int *x, int *y, int n) {
double factor = 1;
#pragma omp parallel for schedule(guided)
for (int i = 0; i < n; i++) {
x[i] = (factor / pow(2, i)) * y[i];
}
}
void b(int *x, int *y, int *z, int n) {
for (int i = 1; i < n; i++) {
x[i] = (x[i] + y[i - 1]) / 2;
y[i] = y[i] + z[i] * 3;
}
}
void b_sol(int *x, int *y, int *z, int n) {
#pragma omp parallel
{
#pragma omp for
for (int i = 1; i < n; i++) {
y[i] = y[i] + z[i] * 3;
}
#pragma omp for
for (int i = 1; i < n; i++) {
x[i] = (x[i] + y[i - 1]) / 2;
}
};
}
void c(int *x, int *y, int n, int twice) {
x[0] = x[0] + 5 * y[0];
for (int i = 1; i < n; i++) {
x[i] = x[i] + 5 * y[i];
if (twice) {
x[i - 1] = 2 * x[i - 1];
}
}
}
void c_sol(int *x, int *y, int n, int twice) {
x[0] = x[0] + 5 * y[0];
#pragma omp parallel
{
#pragma omp for
for (int i = 1; i < n; i++) {
x[i] = x[i] + 5 * y[i];
}
if (twice) {
#pragma omp for
for (int i = 1; i < n; i++) {
x[i - 1] = 2 * x[i - 1];
}
}
}
|
GB_binop__times_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__times_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__times_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fp32)
// A*D function (colscale): GB (_AxD__times_fp32)
// D*A function (rowscale): GB (_DxB__times_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__times_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__times_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fp32)
// C=scalar+B GB (_bind1st__times_fp32)
// C=scalar+B' GB (_bind1st_tran__times_fp32)
// C=A+scalar GB (_bind2nd__times_fp32)
// C=A'+scalar GB (_bind2nd_tran__times_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_FP32 || GxB_NO_TIMES_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__times_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__log_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log_fc32_fc32)
// op(A') function: GB (_unop_tran__log_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = clogf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = clogf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = clogf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = clogf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = clogf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
shallow_water_utilities.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Miguel Maso Sotomayor
//
#ifndef KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED
#define KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED
// System includes
// External includes
// Project includes
#include "includes/model_part.h"
namespace Kratos
{
///@addtogroup ShallowWaterApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class KRATOS_API(SHALLOW_WATER_APPLICATION) ShallowWaterUtilities
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ShallowWaterUtilities
KRATOS_CLASS_POINTER_DEFINITION(ShallowWaterUtilities);
///@}
///@name Life Cycle
///@{
/// Default constructor.
/// Destructor.
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void ComputeFreeSurfaceElevation(ModelPart& rModelPart);
void ComputeHeightFromFreeSurface(ModelPart& rModelPart);
void ComputeVelocity(ModelPart& rModelPart);
void ComputeMomentum(ModelPart& rModelPart);
void ComputeAccelerations(ModelPart& rModelPart);
void FlipScalarVariable(Variable<double>& rOriginVariable, Variable<double>& rDestinationVariable, ModelPart& rModelPart);
void IdentifySolidBoundary(ModelPart& rModelPart, double SeaWaterLevel, Flags SolidBoundaryFlag);
void IdentifyWetDomain(ModelPart& rModelPart, Flags WetFlag, double Thickness = 0.0);
void ResetDryDomain(ModelPart& rModelPart, double Thickness = 0.0);
template<class TContainerType>
void DeactivateDryEntities(TContainerType& rContainer, Flags WetFlag)
{
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(rContainer.size()); ++i)
{
auto it = rContainer.begin() + i;
it->Set(ACTIVE, it->Is(WetFlag));
}
}
void ComputeVisualizationWaterHeight(ModelPart& rModelPart, Flags WetFlag, double SeaWaterLevel = 0.0);
void ComputeVisualizationWaterSurface(ModelPart& rModelPart);
void NormalizeVector(ModelPart& rModelPart, Variable<array_1d<double,3>>& rVariable);
template<class TVarType>
void CopyVariableToPreviousTimeStep(ModelPart& rModelPart, TVarType& rVariable)
{
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(rModelPart.NumberOfNodes()); ++i)
{
auto const it_node = rModelPart.NodesBegin() + i;
it_node->FastGetSolutionStepValue(rVariable,1) = it_node->FastGetSolutionStepValue(rVariable);
}
}
void SetMinimumValue(ModelPart& rModelPart, const Variable<double>& rVariable, double MinValue);
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
///@}
}; // Class ShallowWaterUtilities
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_SHALLOW_WATER_UTILITIES_H_INCLUDED defined
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/APINotes/APINotesManager.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <functional>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
api_notes::APINotesManager APINotes;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
/// \brief Callback to the parser to parse a type expressed as a string.
std::function<TypeResult(StringRef, StringRef, SourceLocation)>
ParseTypeFromStringCallback;
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal swift_name
/// attribute for the decl \p D. Raise a diagnostic if the name is invalid
/// for the given declaration.
///
/// For a function, this will validate a compound Swift name,
/// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>,
/// and the function will output the number of parameter names, and whether
/// this is a single-arg initializer.
///
/// For a type, enum constant, property, or variable declaration, this will
/// validate either a simple identifier, or a qualified
/// <code>context.identifier</code> name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name,
SourceLocation ArgLoc,
const IdentifierInfo *AttrName);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
CompleteTypeKind Kind = CompleteTypeKind::Normal;
if (T->isVLST())
Kind = CompleteTypeKind::AcceptSizeless;
return RequireCompleteType(Loc, T, Kind, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
CompleteTypeKind Kind = CompleteTypeKind::Normal;
if (E->getType()->isVLST())
Kind = CompleteTypeKind::AcceptSizeless;
return RequireCompleteExprType(E, Kind, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range);
bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key);
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
QualType adjustParameterTypeForObjCAutoRefCount(QualType T,
SourceLocation NameLoc,
TypeSourceInfo *TSInfo);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param,
Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name, bool Override);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Map any API notes provided for this declaration to attributes on the
/// declaration.
///
/// Triggered by declaration-attribute processing.
void ProcessAPINotes(Decl *D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type through some means not written in source (e.g. API notes).
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param diagLoc The location to use for diagnostics.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \param overrideExisting Whether to override an existing, locally-specified
/// nullability specifier rather than complaining about the conflict.
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkImplicitNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation diagLoc,
bool allowArrayTypes,
bool overrideExisting);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
/// Check whether the declared result type of the given Objective-C
/// method declaration is compatible with the method's class.
ResultTypeCompatibilityKind
checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method,
const ObjCInterfaceDecl *CurrentClass);
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope.
FunctionDecl *
ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
Declarator &D);
/// Register \p FD as specialization of \p BaseFD in the current `omp
/// begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
FunctionDecl *FD, FunctionDecl *BaseFD);
public:
/// Can we exit a scope at the moment.
bool isInOpenMPDeclareVariantScope() {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, int ArgNum);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
naive_bayes_cilk.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <string.h>
#include <memory.h>
#include <sys/time.h>
#include <cilk/cilk.h>
#define REAL double
#define EPSILON 0.000001
#define MICRO_IN_SEC 1000000.00
/*__declspec(target(mic)) double begin, end;
__declspec(target(mic))
*/
double begin,end;
double microtime(){
int tv_sec,tv_usec;
double time;
struct timeval tv;
struct timezone tz;
gettimeofday(&tv,&tz);
return tv.tv_sec+tv.tv_usec/MICRO_IN_SEC;
}
int caculateNB_train(char *ifn) ;
int caculateNB_classify(char *ifn);
int main(int argc,char* argv[]) {
char *ifn=NULL,*argument=NULL;
if(argc<3)
{
printf("Wrong command format! usage:COMMAND ARGUMENT INPUTFILENAME\nARGUMENT:\n\ttrain:\ttrain the classifier\n\tclassify:\tclassify the dataset\n");
return 0;
}
else
{
ifn=argv[2];
argument=argv[1];
if(!strcmp(argument,"train")) {
caculateNB_train(ifn);
} else if(!strcmp(argument,"classify")) {
caculateNB_classify(ifn);
} else {
printf("Error command!\n");
}
return 0;
}
}
int caculateNB_train(char *ifn) {
char *ofn="nb_train_result.txt";
FILE *ifp,*ofp;
int i,j,k,class_cluster,a,linen,propertyn,classn,*array_valuen,*array_class,*array_property_class,*array_counts,*array_index,array_length;
REAL *array_class_probability,*array_probability;
begin = microtime();
if((ifp=fopen(ifn,"r"))==NULL)
{
printf("%s file open error!\n",ifn);
exit(0);
}
else
{
printf("%s file opened success!\n",ifn);
}
if((ofp=fopen(ofn,"w"))==NULL)
{
printf("%s file open error!\n",ofn);
fclose(ifp);
exit(0);
}
else
{
printf("%s file opened success!\n",ofn);
}
printf("Get base info\n");
fscanf(ifp,"%d%d%d",&linen,&propertyn,&classn); //linen is the number of dataset lines, \
propertyn is the number of property of every dataset, classn is the number of classes;
printf("Read data\n");
//array_valuen is an array of the max value of every property
if((array_valuen=(int *)malloc(propertyn*sizeof(int)))==NULL) {
printf("Memory alloc ERROR!\n");
fclose(ifp);
fclose(ofp);
exit(0);
}
printf("Get property number\n");
for(int i=0;i<propertyn;i++) {
fscanf(ifp,"%d",array_valuen+i);
}
//array_index is an array of the index of every property in array_probability
if((array_index=(int *)malloc(propertyn*sizeof(int)))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
exit(0);
}
for(int i=0;i<propertyn;i++) {
array_index[i] = 0;
}
array_length = array_valuen[0] * classn;
array_index[0] = 0;
if(propertyn>1) {
for(int i=1;i<propertyn;i++) {
array_length += array_valuen[i] * classn;
array_index[i] = array_valuen[i-1] * classn + array_index[i-1];
}
}
//the array_class is the array of count of class
if((array_class=(int *)malloc(classn*(sizeof(int))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_index);
exit(0);
}
memset(array_class,0,classn*sizeof(int));
//the array_property_class is the array of the count of every property of every class
if((array_property_class=(int *)malloc(propertyn*classn*(sizeof(int))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_class);
free(array_index);
exit(0);
}
memset(array_property_class,0,propertyn*classn*sizeof(int));
//array_counts is an array of the pointer of counter of every property of every class
if((array_counts=(int *)malloc(array_length*(sizeof(int))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_index);
free(array_class);
free(array_property_class);
exit(0);
}
memset(array_counts,0,array_length*(sizeof(int)));
printf("Get every needed info\n");
for(int i=0;i<linen;i++) {
fscanf(ifp,"%d",&class_cluster);
array_class[class_cluster]++;
for(int j=0;j<propertyn;j++) {
fscanf(ifp,"%d",&a);
array_counts[ array_index[j] + a*classn + class_cluster ] ++;
array_property_class[j*classn+class_cluster]++;
}
}
//array_class_probability is an array of the classes
if((array_class_probability=(REAL *)malloc(classn*(sizeof(REAL))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_index);
free(array_counts);
free(array_class);
free(array_property_class);
exit(0);
}
//array_probability is an array of the pointer of probability of every property of every class
if((array_probability=(REAL *)malloc(array_length*(sizeof(REAL))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_index);
free(array_counts);
free(array_class);
free(array_class_probability);
free(array_property_class);
exit(0);
}
end = microtime();
printf("\nalloc memory and reading data consuming time: %fs\n\n",end-begin);
begin = end;
printf("Get P(C)\n");
//caculate the p(c)
/*#pragma offload target(mic) \
inout(array_class_probability:length(classn)) \
in(array_class:length(classn))
#pragma omp parallel for
*/
cilk_for(int i=0;i<classn;i++) {
array_class_probability[i]=(REAL)(array_class[i]+1)/(REAL)(linen+classn);
}
printf("Get P(A|C)\n");
//caculate the p(a|c)
/*
#pragma offload target(mic) \
inout(array_probability:length(array_length)) \
in(array_counts:length(array_length)) \
in(array_index:length(propertyn)) \
in(array_property_class:length(propertyn*classn)) \
in(array_valuen:length(propertyn))
#pragma omp parallel for
*/
for(int i=0;i<propertyn;i++) {
for(int j=0;j<array_valuen[i];j++) {
for(int k=0;k<classn;k++) {
array_probability[ array_index[i] + j*classn+k ]=(REAL)( array_counts[ array_index[i] + j*classn+k ] + 1 )/(REAL)(array_property_class[i*classn+k]+array_valuen[i]);
}
}
}
end = microtime();
printf("\ntrain the classifier consuming time: %fs\n\n",end - begin);
begin = end;
//p(c) and p(a|c) is the training result
printf("Outputing the training result to %s\n",ofn);
fprintf(ofp,"%d %d\n",propertyn,classn);
for(int i=0;i<propertyn;i++) {
fprintf(ofp,"%d ",array_valuen[i]);
}
fprintf(ofp,"\n");
for(int i=0;i<classn;i++) {
fprintf(ofp,"%f ",array_class_probability[i]);
}
fprintf(ofp,"\n");
for(int i=0;i<propertyn;i++) {
for(int j=0;j<array_valuen[i];j++) {
for(int k=0;k<classn;k++) {
fprintf(ofp,"%f ",array_probability[ array_index[i] + j*classn+k]);
}
fprintf(ofp,"\n");
}
}
printf("Recycle all resources\n");
//recycle all resources
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_index);
free(array_property_class);
free(array_probability);
free(array_counts);
free(array_class);
free(array_class_probability);
printf("\nPlease DON'T change %s either its name and content!!\n",ofn);
return 0;
}
int caculateNB_classify(char *ifn) {
char *ofn="nb_classify_result.txt",*ifn_classifier="nb_train_result.txt";
FILE *ifp,*ofp,*ifp_classifier;
int i,j,k,max,linen,propertyn,classn,*array_valuen,*array_test_class,*array_test,*array_probability_index,array_probability_length;
REAL *array_class_probability,*array_probability,*array_test_class_probability;
begin = microtime();
if((ifp_classifier=fopen(ifn_classifier,"r"))==NULL)
{
printf("%s file open error!\n",ifn_classifier);
exit(0);
}
else
{
printf("%s file opened success!\n",ifn_classifier);
}
if((ifp=fopen(ifn,"r"))==NULL)
{
printf("%s file open error!\n",ifn);
exit(0);
}
else
{
printf("%s file opened success!\n",ifn);
}
if((ofp=fopen(ofn,"w"))==NULL)
{
printf("%s file open error!\n",ofn);
fclose(ifp);
exit(0);
}
else
{
printf("%s file opened success!\n",ofn);
}
printf("Get base info from %s and configure the classifier\n",ifn_classifier);
fscanf(ifp_classifier,"%d%d",&propertyn,&classn); //propertyn is the number of property of every dataset, classn is the number of classes;
//array_valuen is an array of the max value of every property
if((array_valuen=(int *)malloc(propertyn*sizeof(int)))==NULL) {
printf("Memory alloc ERROR!\n");
fclose(ifp);
fclose(ofp);
exit(0);
}
for(int i=0;i<propertyn;i++) {
fscanf(ifp_classifier,"%d",array_valuen+i);
}
//array_class_probability is an array of the classes
if((array_class_probability=(REAL *)malloc(classn*(sizeof(REAL))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
exit(0);
}
for(int i=0;i<classn;i++) {
fscanf(ifp_classifier,"%f",array_class_probability+i);
}
//array_probability_index is an array of the index of every property in array_probability
if((array_probability_index=(int *)malloc(propertyn*sizeof(int)))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_class_probability);
exit(0);
}
for(int i=0;i<propertyn;i++) {
array_probability_index[i] = 0;
}
array_probability_length = array_valuen[0] * classn;
array_probability_index[0] = 0;
if(propertyn>1) {
for(int i=1;i<propertyn;i++) {
array_probability_length += array_valuen[i] * classn;
array_probability_index[i] = array_valuen[i-1] * classn + array_probability_index[i-1];
}
}
//array_probability is an array of probability of every property of every class
if((array_probability=(REAL *)malloc(array_probability_length*(sizeof(REAL))))==NULL) {
printf("memory alloc error!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_class_probability);
exit(0);
}
for(int i=0;i<propertyn;i++) {
for(int j=0;j<array_valuen[i];j++) {
for(int k=0;k<classn;k++) {
fscanf(ifp_classifier,"%f",&array_probability[ array_probability_index[i] + j*classn + k ]);
}
}
}
printf("Classifier initialize done!\n");
printf("Begin classify the dataset\n");
fscanf(ifp,"%d",&linen);
//array_test is an array of the input data
if((array_test=(int *)malloc(linen*propertyn*sizeof(int)))==NULL) {
printf("Memory alloc ERROR!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_probability);
free(array_class_probability);
exit(0);
}
for(int i=0;i<linen*propertyn;i++) {
fscanf(ifp,"%d",array_test+i);
}
//array_test_class is an array of the classify result of every test data record
if((array_test_class=(int *)malloc(linen*sizeof(int)))==NULL) {
printf("Memory alloc ERROR!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_probability);
free(array_class_probability);
free(array_test);
exit(0);
}
//array_test_class_probability is an array of the probability of every test data record of every class
if((array_test_class_probability=(REAL *)malloc(linen*classn*sizeof(REAL)))==NULL) {
printf("Memory alloc ERROR!\n");
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_probability);
free(array_class_probability);
free(array_test_class);
free(array_test);
exit(0);
}
end = microtime();
printf("\nalloc memory and reading data consuming time: %fs\n\n",end-begin);
begin = end;
/*
#pragma offload target(mic) \
nocopy(array_test_class_probability:length(linen*classn) alloc_if(1) free_if(1)) \
in(array_class_probability:length(classn)) \
in(array_probability:length(array_probability_length)) \
in(array_probability_index:length(propertyn)) \
in(array_test:length(linen*propertyn)) \
out(array_test_class:length(linen))
#pragma omp parallel for
*/
cilk_for(int i=0;i<linen;i++) {
for(int j=0;j<classn;j++) {
array_test_class_probability[i*classn+j] = log( array_class_probability[j] );
}
for(int j=0;j<propertyn;j++) {
for(int k=0;k<classn;k++) {
array_test_class_probability[i*classn+k] += log( array_probability[ array_probability_index[j] + array_test[i*propertyn+j]*classn + k] );
// printf("j=%d k=%d p=%f\n",j,k,array_test_class_probability[i*classn+k]);
}
}
// exit(0);
max=0;
for(int j=0;j<classn;j++) {
if(array_test_class_probability[i*classn+j]-array_test_class_probability[i*classn+max]>EPSILON) {
max=j;
}
}
array_test_class[i]=max;
}
end = microtime();
printf("\nclassify the data consuming time: %fs\n\n",end - begin);
begin = end;
printf("Classify done\n");
for(int i=0;i<linen;i++) {
fprintf(ofp,"%d %d\n",i,array_test_class[i]);
}
printf("Result outputed to %s\n",ofn);
fclose(ifp);
fclose(ofp);
free(array_valuen);
free(array_probability);
free(array_class_probability);
free(array_test_class);
free(array_test_class_probability);
free(array_test);
return 0;
}
|
GB_convert_sparse_to_hyper.c | //------------------------------------------------------------------------------
// GB_convert_sparse_to_hyper: convert a matrix from sparse to hyperspasre
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// On input, the matrix may have shallow A->p content; it is safely removed.
// On output, the matrix is always hypersparse (even if out of memory). If the
// input matrix is non-hypersparse, it is given new A->p and A->h that are not
// shallow. If the input matrix is already hypersparse, nothing is changed
// (and in that case A->p and A->h remain shallow on output if shallow on
// input). The A->x and A->i content is not changed; it remains in whatever
// shallow/non-shallow/iso property that it had on input).
// If an out-of-memory condition occurs, all content of the matrix is cleared.
// If the input matrix A is hypersparse, bitmap or full, it is unchanged.
#include "GB.h"
GrB_Info GB_convert_sparse_to_hyper // convert from sparse to hypersparse
(
GrB_Matrix A, // matrix to convert to hypersparse
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT_MATRIX_OK (A, "A converting to hypersparse", GB0) ;
int64_t anz = GB_nnz (A) ;
ASSERT (GB_ZOMBIES_OK (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (GB_PENDING_OK (A)) ;
//--------------------------------------------------------------------------
// convert A from sparse to hypersparse
//--------------------------------------------------------------------------
if (GB_IS_SPARSE (A))
{
//----------------------------------------------------------------------
// determine the number of threads to use
//----------------------------------------------------------------------
GBURBLE ("(sparse to hyper) ") ;
int64_t n = A->vdim ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (n, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ;
ntasks = GB_IMIN (ntasks, n) ;
ntasks = GB_IMAX (ntasks, 1) ;
//----------------------------------------------------------------------
// count the number of non-empty vectors in A in each slice
//----------------------------------------------------------------------
ASSERT (A->nvec == A->plen && A->plen == n) ;
const int64_t *restrict Ap_old = A->p ;
size_t Ap_old_size = A->p_size ;
bool Ap_old_shallow = A->p_shallow ;
GB_WERK_DECLARE (Count, int64_t) ;
GB_WERK_PUSH (Count, ntasks+1, int64_t) ;
if (Count == NULL)
{
// out of memory
return (GrB_OUT_OF_MEMORY) ;
}
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, my_nvec_nonempty = 0 ; ;
GB_PARTITION (jstart, jend, n, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap_old [j] < Ap_old [j+1]) my_nvec_nonempty++ ;
}
Count [tid] = my_nvec_nonempty ;
}
//----------------------------------------------------------------------
// compute cumulative sum of Counts and nvec_nonempty
//----------------------------------------------------------------------
GB_cumsum (Count, ntasks, NULL, 1, NULL) ;
int64_t nvec_nonempty = Count [ntasks] ;
A->nvec_nonempty = nvec_nonempty ;
//----------------------------------------------------------------------
// allocate the new A->p and A->h
//----------------------------------------------------------------------
int64_t *restrict Ap_new = NULL ; size_t Ap_new_size = 0 ;
int64_t *restrict Ah_new = NULL ; size_t Ah_new_size = 0 ;
Ap_new = GB_MALLOC (nvec_nonempty+1, int64_t, &Ap_new_size) ;
Ah_new = GB_MALLOC (nvec_nonempty , int64_t, &Ah_new_size) ;
if (Ap_new == NULL || Ah_new == NULL)
{
// out of memory
GB_WERK_POP (Count, int64_t) ;
GB_FREE (&Ap_new, Ap_new_size) ;
GB_FREE (&Ah_new, Ah_new_size) ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// transplant the new A->p and A->h into the matrix
//----------------------------------------------------------------------
A->plen = nvec_nonempty ;
A->nvec = nvec_nonempty ;
A->p = Ap_new ; A->p_size = Ap_new_size ;
A->h = Ah_new ; A->h_size = Ah_new_size ;
A->p_shallow = false ;
A->h_shallow = false ;
//----------------------------------------------------------------------
// construct the new hyperlist in the new A->p and A->h
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = Count [tid] ;
GB_PARTITION (jstart, jend, n, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap_old [j] < Ap_old [j+1])
{
// vector index j is the kth vector in the new Ah
Ap_new [k] = Ap_old [j] ;
Ah_new [k] = j ;
k++ ;
}
}
ASSERT (k == Count [tid+1]) ;
}
Ap_new [nvec_nonempty] = anz ;
A->magic = GB_MAGIC ;
//----------------------------------------------------------------------
// free workspace, and free the old A->p unless it's shallow
//----------------------------------------------------------------------
GB_WERK_POP (Count, int64_t) ;
if (!Ap_old_shallow)
{
GB_FREE (&Ap_old, Ap_old_size) ;
}
//----------------------------------------------------------------------
// A is now hypersparse
//----------------------------------------------------------------------
ASSERT (GB_IS_HYPERSPARSE (A)) ;
}
//--------------------------------------------------------------------------
// A is now in hypersparse form (or left as full or bitmap)
//--------------------------------------------------------------------------
ASSERT (anz == GB_nnz (A)) ;
ASSERT_MATRIX_OK (A, "A conv to hypersparse (or left full/bitmap)", GB0) ;
ASSERT (!GB_IS_SPARSE (A)) ;
ASSERT (GB_ZOMBIES_OK (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (GB_PENDING_OK (A)) ;
return (GrB_SUCCESS) ;
}
|
GB_binop__rminus_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rminus_uint64
// A.*B function (eWiseMult): GB_AemultB__rminus_uint64
// A*D function (colscale): GB_AxD__rminus_uint64
// D*A function (rowscale): GB_DxB__rminus_uint64
// C+=B function (dense accum): GB_Cdense_accumB__rminus_uint64
// C+=b function (dense accum): GB_Cdense_accumb__rminus_uint64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rminus_uint64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rminus_uint64
// C=scalar+B GB_bind1st__rminus_uint64
// C=scalar+B' GB_bind1st_tran__rminus_uint64
// C=A+scalar GB_bind2nd__rminus_uint64
// C=A'+scalar GB_bind2nd_tran__rminus_uint64
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (y - x) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_UINT64 || GxB_NO_RMINUS_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rminus_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rminus_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rminus_uint64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rminus_uint64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rminus_uint64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rminus_uint64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__rminus_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rminus_uint64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rminus_uint64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = Bx [p] ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rminus_uint64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = Ax [p] ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB_bind1st_tran__rminus_uint64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = Ax [pA] ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB_bind2nd_tran__rminus_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test.c |
#include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
#define PARALLEL() { \
_Pragma("omp parallel num_threads(128)") \
{ \
int i = omp_get_thread_num()*4; \
for (int j = i; j < i + 4; j++) { \
A[j] += C[j] + D[j]; \
} \
} \
}
// Not sure how to invoke a macro multiple times
#define PARALLEL5() { PARALLEL() PARALLEL() PARALLEL() PARALLEL() PARALLEL() }
#define PARALLEL25() { PARALLEL5() PARALLEL5() PARALLEL5() PARALLEL5() PARALLEL5() }
#define PARALLEL125() { PARALLEL25() PARALLEL25() PARALLEL25() PARALLEL25() PARALLEL25() }
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
INIT();
//
// Test: Multiple parallel regions in a single target.
//
TEST({
for (int i = 0; i < 512; i++) {
A[i] = 0;
}
PARALLEL125()
}, VERIFY(0, 512, A[i], 125*(1+i)));
return 0;
}
|
pt_to_pt_pingping.c | /*****************************************************************************
* *
* Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 *
* *
* produced by *
* *
* Mark Bull, Jim Enright and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk *
* *
* *
* Copyright 2012, The University of Edinburgh *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*-----------------------------------------------------------*/
/* Contains the point-to-point pingping mixed mode */
/* OpenMP/MPI benchmarks. */
/* This includes: -masteronly pingping */
/* -funnelled pingping */
/* -multiple pingping */
/*-----------------------------------------------------------*/
#include "pt_to_pt_pingping.h"
/*-----------------------------------------------------------*/
/* pingPing */
/* */
/* Driver subroutine for the pingping benchmark. */
/*-----------------------------------------------------------*/
int pingPing(int benchmarkType){
int dataSizeIter;
int sameNode;
pingRankA = PPRanks[0];
pingRankB = PPRanks[1];
/* Check if pingRankA and pingRankB are on the same node */
sameNode = compareProcNames(pingRankA, pingRankB);
if (myMPIRank == 0){
/* print message saying if benchmark is inter or intra node */
printNodeReport(sameNode,pingRankA,pingRankB);
/* then print report column headings. */
printBenchHeader();
}
/* initialise repsToDo to defaultReps at start of benchmark */
repsToDo = defaultReps;
/* Loop over data sizes */
dataSizeIter = minDataSize; /* initialise dataSizeIter to minDataSize */
while (dataSizeIter <= maxDataSize){
/* set sizeofBuffer */
sizeofBuffer = dataSizeIter * numThreads;
/* Allocate space for main data arrays */
allocatePingpingData(sizeofBuffer);
/* warm-up for benchmarkType */
if (benchmarkType == MASTERONLY){
/* Masteronly warmp sweep */
masteronlyPingping(warmUpIters, dataSizeIter);
}
else if (benchmarkType == FUNNELLED){
/* perform funnelled warm-up sweep */
funnelledPingping(warmUpIters, dataSizeIter);
}
else if (benchmarkType == MULTIPLE){
multiplePingping(warmUpIters, dataSizeIter);
}
/* perform verification test for the pingping */
testPingping(sizeofBuffer, dataSizeIter);
/* Initialise benchmark */
benchComplete = FALSE;
/* keep executing benchmark until target time is reached */
while (benchComplete != TRUE){
/* Start the timer...MPI_Barrier to synchronise */
MPI_Barrier(comm);
startTime = MPI_Wtime();
if (benchmarkType == MASTERONLY){
/* execute for repsToDo repetitions */
masteronlyPingping(repsToDo, dataSizeIter);
}
else if (benchmarkType == FUNNELLED){
funnelledPingping(repsToDo, dataSizeIter);
}
else if (benchmarkType == MULTIPLE){
multiplePingping(repsToDo, dataSizeIter);
}
/* Stop the timer...MPI_Barrier to synchronise processes */
MPI_Barrier(comm);
finishTime = MPI_Wtime();
totalTime = finishTime - startTime;
/* Call repTimeCheck function to test if target time is reached */
if (myMPIRank==0){
benchComplete = repTimeCheck(totalTime, repsToDo);
}
/* Ensure all procs have the same value of benchComplete */
/* and repsToDo */
MPI_Bcast(&benchComplete, 1, MPI_INT, 0, comm);
MPI_Bcast(&repsToDo, 1, MPI_INT, 0, comm);
}
/* Master process sets benchmark results */
if (myMPIRank == 0){
setReportParams(dataSizeIter, repsToDo, totalTime);
printReport();
}
/* Free the allocated space for the main data arrays */
freePingpingData();
/* Update dataSize before the next iteration */
dataSizeIter = dataSizeIter * 2; /* double data size */
}
return 0;
}
/*-----------------------------------------------------------*/
/* masteronlyPingping */
/* */
/* Two processes send a message to each other using the */
/* MPI_Isend, MPI_Recv and MPI_Wait routines. */
/* Inter-process communication takes place outside of the */
/* parallel region. */
/*-----------------------------------------------------------*/
int masteronlyPingping(int totalReps, int dataSize){
int repIter, i;
int destRank;
/* set destRank to ID of other process */
if (myMPIRank == pingRankA){
destRank = pingRankB;
}
else if (myMPIRank == pingRankB){
destRank = pingRankA;
}
for (repIter = 0; repIter < totalReps; repIter++){
if (myMPIRank == pingRankA || myMPIRank == pingRankB){
/* Each thread writes its globalID to pingSendBuf
* using a PARALLEL DO directive.
*/
#pragma omp parallel for default(none) \
private(i) \
shared(pingSendBuf,dataSize,sizeofBuffer,globalIDarray) \
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Process calls non-bloacking send to start transfer of pingSendBuf
* to other process.
*/
MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, \
TAG, comm, &requestID);
/* Process then waits for message from other process. */
MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, \
TAG, comm, &status);
/* Finish the Send operation with an MPI_Wait */
MPI_Wait(&requestID, &status);
/* Each thread under the MPI process now reads its part of the
* received buffer.
*/
#pragma omp parallel for default(none) \
private(i) \
shared(finalRecvBuf,dataSize,sizeofBuffer,pingRecvBuf) \
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
finalRecvBuf[i] = pingRecvBuf[i];
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* funnelledPingPing */
/* */
/* Two processes send a message to each other using the */
/* MPI_Isend, MPI_Recv and MPI_Wait routines. */
/* Inter-process communication takes place inside the */
/* OpenMP parallel region. */
/*-----------------------------------------------------------*/
int funnelledPingping(int totalReps, int dataSize){
int repIter, i;
int destRank;
/* set destRank to ID of other process */
if (myMPIRank == pingRankA){
destRank = pingRankB;
}
else if (myMPIRank == pingRankB){
destRank = pingRankA;
}
/* Open the parallel region */
#pragma omp parallel default(none) \
private(i, repIter) \
shared(dataSize,sizeofBuffer,pingSendBuf,globalIDarray) \
shared(pingRecvBuf,finalRecvBuf,status,requestID) \
shared(destRank,comm,myMPIRank,pingRankA,pingRankB,totalReps)
for (repIter = 0; repIter < totalReps; repIter++){
if (myMPIRank == pingRankA || myMPIRank == pingRankB){
/* Each thread writes its globalID to its part of
* pingSendBuf.
*/
#pragma omp for schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Implicit barrier here takes care of necessary synchronisation */
#pragma omp master
{
/* Master thread starts send of buffer */
MPI_Isend(pingSendBuf, sizeofBuffer, MPI_INT, destRank, \
TAG, comm, &requestID);
/* then waits for message from other process */
MPI_Recv(pingRecvBuf, sizeofBuffer, MPI_INT, destRank, \
TAG, comm, &status);
/* Master thread then completes send using an MPI_Wait */
MPI_Wait(&requestID, &status);
}
/* Barrier needed to ensure master thread has completed transfer */
#pragma omp barrier
/* Each thread reads its part of the received buffer */
#pragma omp for schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
finalRecvBuf[i] = pingRecvBuf[i];
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* multiplePingping */
/* */
/* With this algorithm multiple threads take place in the */
/* communication and computation. */
/* Each thread sends its portion of the pingSendBuf to the */
/* other process using MPI_Isend/ MPI_Recv/ MPI_Wait */
/* routines. */
/*-----------------------------------------------------------*/
int multiplePingping(int totalReps, int dataSize){
int repIter, i;
int destRank;
int lBound;
/* set destRank to ID of other process */
if (myMPIRank == pingRankA){
destRank = pingRankB;
}
else if (myMPIRank == pingRankB){
destRank = pingRankA;
}
/* Open parallel region */
#pragma omp parallel default(none) \
private(i,lBound,requestID,status,repIter) \
shared(pingSendBuf,pingRecvBuf,finalRecvBuf,sizeofBuffer) \
shared(destRank,myMPIRank,pingRankA,pingRankB,totalReps) \
shared(dataSize,globalIDarray,comm)
{
for (repIter = 0; repIter < totalReps; repIter++){
if (myMPIRank == pingRankA || myMPIRank == pingRankB){
/* Calculate the lower bound of each threads
* portion of the data arrays.
*/
lBound = (myThreadID * dataSize);
/* Each thread writes to its part of pingSendBuf */
#pragma omp for nowait schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
pingSendBuf[i] = globalIDarray[myThreadID];
}
/* Each thread starts send of dataSize items of
* pingSendBuf to process with rank = destRank.
*/
MPI_Isend(&pingSendBuf[lBound], dataSize, MPI_INT, destRank, \
myThreadID, comm, &requestID);
/* Thread then waits for message from destRank with
* tag equal to it thread id.
*/
MPI_Recv(&pingRecvBuf[lBound], dataSize, MPI_INT, destRank, \
myThreadID, comm, &status);
/* Thread completes send using MPI_Wait */
MPI_Wait(&requestID, &status);
/* Each thread reads its part of received buffer. */
#pragma omp for nowait schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
finalRecvBuf[i] = pingRecvBuf[i];
}
}
}
}
return 0;
}
/*-----------------------------------------------------------*/
/* allocatePingpingData */
/* */
/* Allocates space for the main data arrays. */
/* Size of each array is specified by subroutine argument. */
/*-----------------------------------------------------------*/
int allocatePingpingData(int sizeofBuffer){
pingSendBuf = (int *)malloc(sizeofBuffer * sizeof(int));
pingRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));
finalRecvBuf = (int *)malloc(sizeofBuffer * sizeof(int));
return 0;
}
/*-----------------------------------------------------------*/
/* freePingpingData */
/* */
/* Deallocates the storage space for the main data arrays. */
/*-----------------------------------------------------------*/
int freePingpingData(){
free(pingSendBuf);
free(pingRecvBuf);
free(finalRecvBuf);
return 0;
}
/*-----------------------------------------------------------*/
/* testPingping */
/* */
/* Verifies that the PingPing benchmark worked correctly. */
/*-----------------------------------------------------------*/
int testPingping(int sizeofBuffer,int dataSize){
int otherPingRank, i, testFlag, reduceFlag;
int *testBuf;
/* initialise testFlag to true (test passed) */
testFlag = TRUE;
/* Testing only needs to be done by pingRankA & pingRankB */
if (myMPIRank == pingRankA || myMPIRank == pingRankB){
/* allocate space for testBuf */
testBuf = (int *)malloc(sizeofBuffer * sizeof(int));
/* set the ID of other pingRank */
if (myMPIRank == pingRankA){
otherPingRank = pingRankB;
}
else if (myMPIRank == pingRankB){
otherPingRank = pingRankA;
}
/* construct testBuf array with correct values.
* These are the values that should be in finalRecvBuf.
*/
#pragma omp parallel for default(none) \
private(i) \
shared(otherPingRank,numThreads,testBuf,dataSize,sizeofBuffer) \
schedule(static,dataSize)
for (i=0; i<sizeofBuffer; i++){
/* calculate globalID of thread expected in finalRecvBuf
* This is done by using otherPingRank
*/
testBuf[i] = (otherPingRank * numThreads) + myThreadID;
}
/* compare each element of testBuf and finalRecvBuf */
for (i=0; i<sizeofBuffer; i++){
if (testBuf[i] != finalRecvBuf[i]){
testFlag = FALSE;
}
}
/* free space for testBuf */
free(testBuf);
}
MPI_Reduce(&testFlag, &reduceFlag, 1, MPI_INT, MPI_LAND, 0, comm);
/* Master process sets the testOutcome using testFlag. */
if (myMPIRank == 0){
setTestOutcome(reduceFlag);
}
return 0;
}
|
cg.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - CG
This benchmark is an OpenMP C version of the NPB CG code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: M. Yarrow
C. Kuszmaul
OpenMP C version: S. Satoh
3.0 structure translation: F. Conti
--------------------------------------------------------------------*/
/*
c---------------------------------------------------------------------
c Note: please observe that in the routine conj_grad three
c implementations of the sparse matrix-vector multiply have
c been supplied. The default matrix-vector multiply is not
c loop unrolled. The alternate implementations are unrolled
c to a depth of 2 and unrolled to a depth of 8. Please
c experiment with these to find the fastest for your particular
c architecture. If reporting timing results, any of these three may
c be used without penalty.
c---------------------------------------------------------------------
*/
#include "npb-C.h"
#include "npbparams.h"
#define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2)
/* global variables */
/* common /partit_size/ */
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
/* common /main_int_mem/ */
static int colidx[NZ+1]; /* colidx[1:NZ] */
static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */
static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */
static int arow[NZ+1]; /* arow[1:NZ] */
static int acol[NZ+1]; /* acol[1:NZ] */
/* common /main_flt_mem/ */
static double v[NA+1+1]; /* v[1:NA+1] */
static double aelt[NZ+1]; /* aelt[1:NZ] */
static double a[NZ+1]; /* a[1:NZ] */
static double x[NA+2+1]; /* x[1:NA+2] */
static double z[NA+2+1]; /* z[1:NA+2] */
static double p[NA+2+1]; /* p[1:NA+2] */
static double q[NA+2+1]; /* q[1:NA+2] */
static double r[NA+2+1]; /* r[1:NA+2] */
//static double w[NA+2+1]; /* w[1:NA+2] */
/* common /urando/ */
static double amult;
static double tran;
/* function declarations */
static void conj_grad (int colidx[], int rowstr[], double x[], double z[],
double a[], double p[], double q[], double r[],
//double w[],
double *rnorm);
static void makea(int n, int nz, double a[], int colidx[], int rowstr[],
int nonzer, int firstrow, int lastrow, int firstcol,
int lastcol, double rcond, int arow[], int acol[],
double aelt[], double v[], int iv[], double shift );
static void sparse(double a[], int colidx[], int rowstr[], int n,
int arow[], int acol[], double aelt[],
int firstrow, int lastrow,
double x[], boolean mark[], int nzloc[], int nnza);
static void sprnvc(int n, int nz, double v[], int iv[], int nzloc[],
int mark[]);
static int icnvrt(double x, int ipwr2);
static void vecset(int n, double v[], int iv[], int *nzv, int i, double val);
/*--------------------------------------------------------------------
program cg
--------------------------------------------------------------------*/
int main(int argc, char **argv) {
int i, j, k, it;
int nthreads = 1;
double zeta;
double rnorm;
double norm_temp11;
double norm_temp12;
double t, mflops;
char class;
boolean verified;
double zeta_verify_value, epsilon;
firstrow = 1;
lastrow = NA;
firstcol = 1;
lastcol = NA;
if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) {
class = 'S';
zeta_verify_value = 8.5971775078648;
} else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) {
class = 'W';
zeta_verify_value = 10.362595087124;
} else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) {
class = 'A';
zeta_verify_value = 17.130235054029;
} else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) {
class = 'B';
zeta_verify_value = 22.712745482631;
} else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) {
class = 'C';
zeta_verify_value = 28.973605592845;
} else {
class = 'U';
}
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - CG Benchmark\n");
printf(" Size: %10d\n", NA);
printf(" Iterations: %5d\n", NITER);
naa = NA;
nzz = NZ;
/*--------------------------------------------------------------------
c Initialize random number generator
c-------------------------------------------------------------------*/
tran = 314159265.0;
amult = 1220703125.0;
zeta = randlc( &tran, amult );
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
makea(naa, nzz, a, colidx, rowstr, NONZER,
firstrow, lastrow, firstcol, lastcol,
RCOND, arow, acol, aelt, v, iv, SHIFT);
/*---------------------------------------------------------------------
c Note: as a result of the above call to makea:
c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1
c values of colidx which are col indexes go from firstcol --> lastcol
c So:
c Shift the col index vals from actual (firstcol --> lastcol )
c to local, i.e., (1 --> lastcol-firstcol+1)
c---------------------------------------------------------------------*/
#pragma omp parallel default(shared) private(i,j,k)
{
#pragma omp for nowait
for (j = 1; j <= lastrow - firstrow + 1; j++) {
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
colidx[k] = colidx[k] - firstcol + 1;
}
}
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (i = 1; i <= NA+1; i++) {
x[i] = 1.0;
}
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
}// end omp parallel
zeta = 0.0;
/*-------------------------------------------------------------------
c---->
c Do one iteration untimed to init all code and data page tables
c----> (then reinit, start timing, to niter its)
c-------------------------------------------------------------------*/
for (it = 1; it <= 1; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
conj_grad (colidx, rowstr, x, z, a, p, q, r,/* w,*/ &rnorm);
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
norm_temp11 = 0.0;
norm_temp12 = 0.0;
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)
for (j = 1; j <= lastcol-firstcol+1; j++) {
norm_temp11 = norm_temp11 + x[j]*z[j];
norm_temp12 = norm_temp12 + z[j]*z[j];
}
norm_temp12 = 1.0 / sqrt( norm_temp12 );
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(j)
for (j = 1; j <= lastcol-firstcol+1; j++) {
x[j] = norm_temp12*z[j];
}
} /* end of do one iteration untimed */
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(i)
for (i = 1; i <= NA+1; i++) {
x[i] = 1.0;
}
zeta = 0.0;
timer_clear( 1 );
timer_start( 1 );
/*--------------------------------------------------------------------
c---->
c Main Iteration for inverse power method
c---->
c-------------------------------------------------------------------*/
for (it = 1; it <= NITER; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
conj_grad(colidx, rowstr, x, z, a, p, q, r/*, w*/, &rnorm);
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
norm_temp11 = 0.0;
norm_temp12 = 0.0;
#pragma omp parallel for default(shared) private(j) reduction(+:norm_temp11,norm_temp12)
for (j = 1; j <= lastcol-firstcol+1; j++) {
norm_temp11 = norm_temp11 + x[j]*z[j];
norm_temp12 = norm_temp12 + z[j]*z[j];
}
norm_temp12 = 1.0 / sqrt( norm_temp12 );
zeta = SHIFT + 1.0 / norm_temp11;
if( it == 1 ) {
printf(" iteration ||r|| zeta\n");
}
printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta);
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(j)
for (j = 1; j <= lastcol-firstcol+1; j++) {
x[j] = norm_temp12*z[j];
}
} /* end of main iter inv pow meth */
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop( 1 );
/*--------------------------------------------------------------------
c End of timed section
c-------------------------------------------------------------------*/
t = timer_read( 1 );
printf(" Benchmark completed\n");
epsilon = 1.0e-10;
if (class != 'U') {
if (fabs(zeta - zeta_verify_value) <= epsilon) {
verified = TRUE;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.12e\n", zeta);
printf(" Error is %20.12e\n", zeta - zeta_verify_value);
} else {
verified = FALSE;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.12e\n", zeta);
printf(" The correct zeta is %20.12e\n", zeta_verify_value);
}
} else {
verified = FALSE;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if ( t != 0.0 ) {
mflops = (2.0*NITER*NA)
* (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 )
/ t / 1000000.0;
} else {
mflops = 0.0;
}
c_print_results("CG", class, NA, 0, 0, NITER, nthreads, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void conj_grad (
int colidx[], /* colidx[1:nzz] */
int rowstr[], /* rowstr[1:naa+1] */
double x[], /* x[*] */
double z[], /* z[*] */
double a[], /* a[1:nzz] */
double p[], /* p[*] */
double q[], /* q[*] */
double r[], /* r[*] */
//double w[], /* w[*] */
double *rnorm )
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
/*---------------------------------------------------------------------
c Floaging point arrays here are named as in NPB1 spec discussion of
c CG algorithm
c---------------------------------------------------------------------*/
{
static int callcount = 0;
double d, sum, rho, rho0, alpha, beta;
int i, j, k;
int cgit, cgitmax = 25;
rho = 0.0;
#pragma omp parallel default(shared) private(j,sum) shared(rho,naa)
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
{
#pragma omp for
for (j = 1; j <= naa+1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = x[j];
p[j] = r[j];
//w[j] = 0.0;
}
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
#pragma omp for reduction(+:rho)
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + r[j]*r[j];
}
}/* end omp parallel */
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
rho0 = rho;
d = 0.0;
rho = 0.0;
#pragma omp parallel default(shared) private(j,k,sum,alpha,beta) shared(d,rho0,rho)
{
/*--------------------------------------------------------------------
c q = A.p
c The partition submatrix-vector multiply: use workspace w
c---------------------------------------------------------------------
C
C NOTE: this version of the multiply is actually (slightly: maybe %5)
C faster on the sp2 on 16 nodes than is the unrolled-by-2 version
C below. On the Cray t3d, the reverse is true, i.e., the
C unrolled-by-two version is some 10% faster.
C The unrolled-by-8 version below is significantly faster
C on the Cray t3d - overall speed of code is 1.5 times faster.
*/
/* rolled version */
#pragma omp for
for (j = 1; j <= lastrow-firstrow+1; j++) {
sum = 0.0;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
//w[j] = sum;
q[j] = sum;
}
/* unrolled-by-two version
#pragma omp for private(i,k)
for (j = 1; j <= lastrow-firstrow+1; j++) {
int iresidue;
double sum1, sum2;
i = rowstr[j];
iresidue = (rowstr[j+1]-i) % 2;
sum1 = 0.0;
sum2 = 0.0;
if (iresidue == 1) sum1 = sum1 + a[i]*p[colidx[i]];
for (k = i+iresidue; k <= rowstr[j+1]-2; k += 2) {
sum1 = sum1 + a[k] * p[colidx[k]];
sum2 = sum2 + a[k+1] * p[colidx[k+1]];
}
w[j] = sum1 + sum2;
}
*/
/* unrolled-by-8 version
#pragma omp for private(i,k,sum)
for (j = 1; j <= lastrow-firstrow+1; j++) {
int iresidue;
i = rowstr[j];
iresidue = (rowstr[j+1]-i) % 8;
sum = 0.0;
for (k = i; k <= i+iresidue-1; k++) {
sum = sum + a[k] * p[colidx[k]];
}
for (k = i+iresidue; k <= rowstr[j+1]-8; k += 8) {
sum = sum + a[k ] * p[colidx[k ]]
+ a[k+1] * p[colidx[k+1]]
+ a[k+2] * p[colidx[k+2]]
+ a[k+3] * p[colidx[k+3]]
+ a[k+4] * p[colidx[k+4]]
+ a[k+5] * p[colidx[k+5]]
+ a[k+6] * p[colidx[k+6]]
+ a[k+7] * p[colidx[k+7]];
}
w[j] = sum;
}
*/
/*
#pragma omp for
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = w[j];
}
*/
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
/*
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0;
}
*/
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
#pragma omp for reduction(+:d)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = d + p[j]*q[j];
}
#pragma omp barrier
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
//#pragma omp single
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
#pragma omp for reduction(+:rho)
for (j = 1; j <= lastcol-firstcol+1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
// }
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
/*
#pragma omp for
for (j = 1; j <= lastcol-firstcol+1; j++) {*/
rho = rho + r[j]*r[j];
}
//#pragma omp barrier
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
//#pragma omp single
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (j = 1; j <= lastcol-firstcol+1; j++) {
p[j] = r[j] + beta*p[j];
}
callcount++;
} /* end omp parallel */
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
sum = 0.0;
#pragma omp parallel default(shared) private(j,d) shared(sum)
{
#pragma omp for //private(d, k)
for (j = 1; j <= lastrow-firstrow+1; j++) {
d = 0.0;
for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {
d = d + a[k]*z[colidx[k]];
}
r[j] = d;
}
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
#pragma omp for reduction(+:sum)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = x[j] - r[j];
sum = sum + d*d;
}
} //end omp parallel
(*rnorm) = sqrt(sum);
}
/*---------------------------------------------------------------------
c generate the test problem for benchmark 6
c makea generates a sparse matrix with a
c prescribed sparsity distribution
c
c parameter type usage
c
c input
c
c n i number of cols/rows of matrix
c nz i nonzeros as declared array size
c rcond r*8 condition number
c shift r*8 main diagonal shift
c
c output
c
c a r*8 array for nonzeros
c colidx i col indices
c rowstr i row pointers
c
c workspace
c
c iv, arow, acol i
c v, aelt r*8
c---------------------------------------------------------------------*/
static void makea(
int n,
int nz,
double a[], /* a[1:nz] */
int colidx[], /* colidx[1:nz] */
int rowstr[], /* rowstr[1:n+1] */
int nonzer,
int firstrow,
int lastrow,
int firstcol,
int lastcol,
double rcond,
int arow[], /* arow[1:nz] */
int acol[], /* acol[1:nz] */
double aelt[], /* aelt[1:nz] */
double v[], /* v[1:n+1] */
int iv[], /* iv[1:2*n+1] */
double shift )
{
int i, nnza, iouter, ivelt, ivelt1, irow, nzv;
/*--------------------------------------------------------------------
c nonzer is approximately (int(sqrt(nnza /n)));
c-------------------------------------------------------------------*/
double size, ratio, scale;
int jcol;
size = 1.0;
ratio = pow(rcond, (1.0 / (double)n));
nnza = 0;
/*---------------------------------------------------------------------
c Initialize colidx(n+1 .. 2n) to zero.
c Used by sprnvc to mark nonzero positions
c---------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(i)
for (i = 1; i <= n; i++) {
colidx[n+i] = 0;
}
for (iouter = 1; iouter <= n; iouter++) {
nzv = nonzer;
sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n]));
vecset(n, v, iv, &nzv, iouter, 0.5);
for (ivelt = 1; ivelt <= nzv; ivelt++) {
jcol = iv[ivelt];
if (jcol >= firstcol && jcol <= lastcol) {
scale = size * v[ivelt];
for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) {
irow = iv[ivelt1];
if (irow >= firstrow && irow <= lastrow) {
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in"
" makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = jcol;
arow[nnza] = irow;
aelt[nnza] = v[ivelt1] * scale;
}
}
}
}
size = size * ratio;
}
/*---------------------------------------------------------------------
c ... add the identity * rcond to the generated matrix to bound
c the smallest eigenvalue from below by rcond
c---------------------------------------------------------------------*/
for (i = firstrow; i <= lastrow; i++) {
if (i >= firstcol && i <= lastcol) {
iouter = n + i;
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = i;
arow[nnza] = i;
aelt[nnza] = rcond - shift;
}
}
/*---------------------------------------------------------------------
c ... make the sparse matrix from list of elements with duplicates
c (v and iv are used as workspace)
c---------------------------------------------------------------------*/
sparse(a, colidx, rowstr, n, arow, acol, aelt,
firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza);
}
/*---------------------------------------------------
c generate a sparse matrix from a list of
c [col, row, element] tri
c---------------------------------------------------*/
static void sparse(
double a[], /* a[1:*] */
int colidx[], /* colidx[1:*] */
int rowstr[], /* rowstr[1:*] */
int n,
int arow[], /* arow[1:*] */
int acol[], /* acol[1:*] */
double aelt[], /* aelt[1:*] */
int firstrow,
int lastrow,
double x[], /* x[1:n] */
boolean mark[], /* mark[1:n] */
int nzloc[], /* nzloc[1:n] */
int nnza)
/*---------------------------------------------------------------------
c rows range from firstrow to lastrow
c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
c---------------------------------------------------------------------*/
{
int nrows;
int i, j, jajp1, nza, k, nzrow;
double xi;
/*--------------------------------------------------------------------
c how many rows of result
c-------------------------------------------------------------------*/
nrows = lastrow - firstrow + 1;
/*--------------------------------------------------------------------
c ...count the number of triples in each row
c-------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(j)
for (j = 1; j <= n; j++) {
rowstr[j] = 0;
mark[j] = FALSE;
}
rowstr[n+1] = 0;
for (nza = 1; nza <= nnza; nza++) {
j = (arow[nza] - firstrow + 1) + 1;
rowstr[j] = rowstr[j] + 1;
}
rowstr[1] = 1;
for (j = 2; j <= nrows+1; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}
/*---------------------------------------------------------------------
c ... rowstr(j) now is the location of the first nonzero
c of row j of a
c---------------------------------------------------------------------*/
/*---------------------------------------------------------------------
c ... preload data pages
c---------------------------------------------------------------------*/
#pragma omp parallel for default(shared) private(k,j)
for(j = 0;j <= nrows-1;j++) {
for(k = rowstr[j];k <= rowstr[j+1]-1;k++)
a[k] = 0.0;
}
/*--------------------------------------------------------------------
c ... do a bucket sort of the triples on the row index
c-------------------------------------------------------------------*/
for (nza = 1; nza <= nnza; nza++) {
j = arow[nza] - firstrow + 1;
k = rowstr[j];
a[k] = aelt[nza];
colidx[k] = acol[nza];
rowstr[j] = rowstr[j] + 1;
}
/*--------------------------------------------------------------------
c ... rowstr(j) now points to the first element of row j+1
c-------------------------------------------------------------------*/
for (j = nrows; j >= 1; j--) {
rowstr[j+1] = rowstr[j];
}
rowstr[1] = 1;
/*--------------------------------------------------------------------
c ... generate the actual output rows by adding elements
c-------------------------------------------------------------------*/
nza = 0;
#pragma omp parallel for default(shared) private(i)
for (i = 1; i <= n; i++) {
x[i] = 0.0;
mark[i] = FALSE;
}
jajp1 = rowstr[1];
for (j = 1; j <= nrows; j++) {
nzrow = 0;
/*--------------------------------------------------------------------
c ...loop over the jth row of a
c-------------------------------------------------------------------*/
for (k = jajp1; k < rowstr[j+1]; k++) {
i = colidx[k];
x[i] = x[i] + a[k];
if ( mark[i] == FALSE && x[i] != 0.0) {
mark[i] = TRUE;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
}
}
/*--------------------------------------------------------------------
c ... extract the nonzeros of this row
c-------------------------------------------------------------------*/
for (k = 1; k <= nzrow; k++) {
i = nzloc[k];
mark[i] = FALSE;
xi = x[i];
x[i] = 0.0;
if (xi != 0.0) {
nza = nza + 1;
a[nza] = xi;
colidx[nza] = i;
}
}
jajp1 = rowstr[j+1];
rowstr[j+1] = nza + rowstr[1];
}
}
/*---------------------------------------------------------------------
c generate a sparse n-vector (v, iv)
c having nzv nonzeros
c
c mark(i) is set to 1 if position i is nonzero.
c mark is all zero on entry and is reset to all zero before exit
c this corrects a performance bug found by John G. Lewis, caused by
c reinitialization of mark on every one of the n calls to sprnvc
---------------------------------------------------------------------*/
static void sprnvc(
int n,
int nz,
double v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int nzloc[], /* nzloc[1:n] */
int mark[] ) /* mark[1:n] */
{
int nn1;
int nzrow, nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
nzrow = 0;
nn1 = 1;
do {
nn1 = 2 * nn1;
} while (nn1 < n);
/*--------------------------------------------------------------------
c nn1 is the smallest power of two not less than n
c-------------------------------------------------------------------*/
while (nzv < nz) {
vecelt = randlc(&tran, amult);
/*--------------------------------------------------------------------
c generate an integer between 1 and n in a portable manner
c-------------------------------------------------------------------*/
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n) continue;
/*--------------------------------------------------------------------
c was this integer generated already?
c-------------------------------------------------------------------*/
if (mark[i] == 0) {
mark[i] = 1;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
nzv = nzv + 1;
v[nzv] = vecelt;
iv[nzv] = i;
}
}
for (ii = 1; ii <= nzrow; ii++) {
i = nzloc[ii];
mark[i] = 0;
}
}
/*---------------------------------------------------------------------
* scale a double precision number x in (0,1) by a power of 2 and chop it
*---------------------------------------------------------------------*/
static int icnvrt(double x, int ipwr2) {
return ((int)(ipwr2 * x));
}
/*--------------------------------------------------------------------
c set ith element of sparse vector (v, iv) with
c nzv nonzeros to val
c-------------------------------------------------------------------*/
static void vecset(
int n,
double v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int *nzv,
int i,
double val)
{
int k;
boolean set;
set = FALSE;
for (k = 1; k <= *nzv; k++) {
if (iv[k] == i) {
v[k] = val;
set = TRUE;
}
}
if (set == FALSE) {
*nzv = *nzv + 1;
v[*nzv] = val;
iv[*nzv] = i;
}
}
|
subopt.c | /*
$Log: subopt.c,v $
Revision 2.0 2010/12/06 20:04:20 ronny
repaired subopt for cofolding
Revision 1.24 2008/11/01 21:10:20 ivo
avoid rounding errors when computing DoS
Revision 1.23 2008/03/31 15:06:49 ivo
Add cofolding support in subopt
Revision 1.22 2008/02/23 09:42:35 ivo
fix circular folding bugs with dangles that cross the origin
Revision 1.21 2008/01/08 15:08:51 ivo
circular fold would fail for open chain
Revision 1.20 2008/01/08 14:08:20 ivo
add an option to compute the density of state
Revision 1.19 2007/12/05 13:04:04 ivo
add various circfold variants from Ronny
Revision 1.18 2003/10/06 08:56:45 ivo
use P->TerminalAU
Revision 1.17 2003/08/26 09:26:08 ivo
don't modify print_energy in subopt(); use doubles instead of floats
Revision 1.16 2001/10/01 13:50:00 ivo
sorted -> subopt_sorted
Revision 1.15 2001/09/17 10:30:42 ivo
move scale_parameters() into params.c
returns pointer to paramT structure
Revision 1.14 2001/08/31 15:02:19 ivo
Let subopt either write to file pointer or return a list of structures,
so we can nicely integrate it into the library
Revision 1.13 2001/04/05 07:35:08 ivo
remove uneeded declaration of TETRA_ENERGY
Revision 1.12 2000/10/10 08:53:20 ivo
adapted for new Turner energy parameters
supports all constraints that forbid pairs
Revision 1.11 2000/04/08 15:56:18 ivo
with noLonelyPairs=1 will produce no structures with isolated base pairs
(Giegerich's canonical structures)
Revision 1.10 1999/05/06 10:13:35 ivo
recalculte energies before printing if logML is set
+ cosmetic changes
Revision 1.9 1998/05/19 16:31:52 ivo
added support for constrained folding
Revision 1.8 1998/03/30 14:44:54 ivo
cleanup of make_printout etc.
Revision 1.7 1998/03/30 14:39:31 ivo
replaced BasePairs list with structure string in STATE
save memory by not storing (and sorting) structures
modified for use with ViennaRNA-1.2.1
Revision 1.6 1997/10/21 11:34:09 walter
steve update
Revision 1.1 1997/08/04 21:05:32 walter
Initial revision
*/
/*
suboptimal folding - Stefan Wuchty, Walter Fontana & Ivo Hofacker
Vienna RNA package
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <string.h>
#include <math.h>
#include "fold.h"
#include "utils.h"
#include "energy_par.h"
#include "fold_vars.h"
#include "pair_mat.h"
#include "list.h"
#include "params.h"
#include "loop_energies.h"
#include "cofold.h"
#include "gquad.h"
#include "subopt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#define true 1
#define false 0
#define SAME_STRAND(I,J) (((I)>=cut_point)||((J)<cut_point))
#define NEW_NINIO 1 /* use new asymetry penalty */
#define STACK_BULGE1 1 /* stacking energies for bulges of size 1 */
#define MAXALPHA 20 /* maximal length of alphabet */
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
PUBLIC int subopt_sorted=0; /* output sorted by energy */
PUBLIC int density_of_states[MAXDOS+1];
PUBLIC double print_energy = 9999; /* printing threshold for use with logML */
typedef struct {
char *structure;
LIST *Intervals;
int partial_energy;
int is_duplex;
/* int best_energy; */ /* best attainable energy */
} STATE;
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
PRIVATE int turn;
PRIVATE LIST *Stack = NULL;
PRIVATE int nopush;
PRIVATE int best_energy; /* best_energy = remaining energy */
PRIVATE int *f5 = NULL; /* energy of 5 end */
PRIVATE int *c = NULL; /* energy array, given that i-j pair */
PRIVATE int *fML = NULL; /* multi-loop auxiliary energy array */
PRIVATE int *fM1 = NULL; /* another multi-loop auxiliary energy array */
PRIVATE int *fc = NULL; /*energy array, from i (j) to cut*/
PRIVATE int *indx = NULL; /* index for moving in the triangle matrices c[] and f[] */
PRIVATE short *S=NULL, *S1=NULL;
PRIVATE char *ptype=NULL;
PRIVATE paramT *P = NULL;
PRIVATE int length;
PRIVATE int minimal_energy; /* minimum free energy */
PRIVATE int element_energy; /* internal energy of a structural element */
PRIVATE int threshold; /* minimal_energy + delta */
PRIVATE char *sequence = NULL;
PRIVATE int circular = 0;
PRIVATE int struct_constrained = 0;
PRIVATE int *fM2 = NULL; /* energies of M2 */
PRIVATE int Fc, FcH, FcI, FcM; /* parts of the exterior loop energies */
PRIVATE int with_gquad = 0;
PRIVATE int *ggg = NULL;
#ifdef _OPENMP
#pragma omp threadprivate(turn, Stack, nopush, best_energy, f5, c, fML, fM1, fc, indx, S, S1,\
ptype, P, length, minimal_energy, element_energy, threshold, sequence,\
fM2, Fc, FcH, FcI, FcM, circular, struct_constrained,\
ggg, with_gquad)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE void make_pair(int i, int j, STATE *state);
/* mark a gquadruplex in the resulting dot-bracket structure */
PRIVATE void make_gquad(int i, int L, int l[3], STATE *state);
PRIVATE INTERVAL *make_interval (int i, int j, int ml);
/*@out@*/ PRIVATE STATE *make_state(/*@only@*/LIST *Intervals,
/*@only@*/ /*@null@*/ char *structure,
int partial_energy, int is_duplex);
PRIVATE STATE *copy_state(STATE * state);
PRIVATE void print_state(STATE * state);
PRIVATE void UNUSED print_stack(LIST * list);
/*@only@*/ PRIVATE LIST *make_list(void);
PRIVATE void push(LIST * list, /*@only@*/ void *data);
PRIVATE void *pop(LIST * list);
PRIVATE int best_attainable_energy(STATE * state);
PRIVATE void scan_interval(int i, int j, int array_flag, STATE * state);
PRIVATE void free_interval_node(/*@only@*/ INTERVAL * node);
PRIVATE void free_state_node(/*@only@*/ STATE * node);
PRIVATE void push_back(STATE * state);
PRIVATE char* get_structure(STATE * state);
PRIVATE int compare(const void *solution1, const void *solution2);
PRIVATE void make_output(SOLUTION *SL, FILE *fp);
PRIVATE char *costring(char *string);
PRIVATE void repeat(int i, int j, STATE * state, int part_energy, int temp_energy);
PRIVATE void repeat_gquad( int i, int j, STATE *state, int part_energy, int temp_energy);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
/*---------------------------------------------------------------------------*/
/*List routines--------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
PRIVATE void
make_pair(int i, int j, STATE *state)
{
state->structure[i-1] = '(';
state->structure[j-1] = ')';
}
PRIVATE void
make_gquad(int i, int L, int l[3], STATE *state)
{
int x;
for(x = 0; x < L; x++){
state->structure[i - 1 + x] = '+';
state->structure[i - 1 + x + L + l[0]] = '+';
state->structure[i - 1 + x + 2*L + l[0] + l[1]] = '+';
state->structure[i - 1 + x + 3*L + l[0] + l[1] + l[2]] = '+';
}
}
/*---------------------------------------------------------------------------*/
PRIVATE INTERVAL *
make_interval(int i, int j, int array_flag)
{
INTERVAL *interval;
interval = lst_newnode(sizeof(INTERVAL));
interval->i = i;
interval->j = j;
interval->array_flag = array_flag;
return interval;
}
/*---------------------------------------------------------------------------*/
PRIVATE void
free_interval_node(INTERVAL * node)
{
lst_freenode(node);
}
/*---------------------------------------------------------------------------*/
PRIVATE void
free_state_node(STATE * node)
{
free(node->structure);
if (node->Intervals)
lst_kill(node->Intervals, lst_freenode);
lst_freenode(node);
}
/*---------------------------------------------------------------------------*/
PRIVATE STATE *
make_state(LIST * Intervals,
char *structure,
int partial_energy,
int is_duplex)
{
STATE *state;
state = lst_newnode(sizeof(STATE));
if (Intervals)
state->Intervals = Intervals;
else
state->Intervals = lst_init();
if (structure)
state->structure = structure;
else {
int i;
state->structure = (char *) space(length+1);
for (i=0; i<length; i++)
state->structure[i] = '.';
}
state->partial_energy = partial_energy;
return state;
}
/*---------------------------------------------------------------------------*/
PRIVATE STATE *
copy_state(STATE * state)
{
STATE *new_state;
void *after;
INTERVAL *new_interval, *next;
new_state = lst_newnode(sizeof(STATE));
new_state->Intervals = lst_init();
new_state->partial_energy = state->partial_energy;
/* new_state->best_energy = state->best_energy; */
if (state->Intervals->count) {
after = LST_HEAD(new_state->Intervals);
for ( next = lst_first(state->Intervals); next; next = lst_next(next))
{
new_interval = lst_newnode(sizeof(INTERVAL));
*new_interval = *next;
lst_insertafter(new_state->Intervals, new_interval, after);
after = new_interval;
}
}
new_state->structure = strdup(state->structure);
if (!new_state->structure) nrerror("out of memory");
return new_state;
}
/*---------------------------------------------------------------------------*/
/*@unused @*/ PRIVATE void
print_state(STATE * state)
{
INTERVAL *next;
if (state->Intervals->count)
{
printf("%d intervals:\n", state->Intervals->count);
for (next = lst_first(state->Intervals); next; next = lst_next(next))
{
printf("[%d,%d],%d ", next->i, next->j, next->array_flag);
}
printf("\n");
}
printf("partial structure: %s\n", state->structure);
printf("\n");
printf(" partial_energy: %d\n", state->partial_energy);
/* printf(" best_energy: %d\n", state->best_energy); */
(void) fflush(stdout);
}
/*---------------------------------------------------------------------------*/
/*@unused @*/ PRIVATE void
print_stack(LIST * list)
{
void *rec;
printf("================\n");
printf("%d states\n", list->count);
for (rec = lst_first(list); rec; rec = lst_next(rec))
{
printf("state-----------\n");
print_state(rec);
}
printf("================\n");
}
/*---------------------------------------------------------------------------*/
PRIVATE LIST *
make_list(void)
{
return lst_init();
}
/*---------------------------------------------------------------------------*/
PRIVATE void
push(LIST * list, void *data)
{
nopush = false;
lst_insertafter(list, data, LST_HEAD(list));
}
/* PRIVATE void */
/* push_stack(STATE *state) { */ /* keep the stack sorted by energy */
/* STATE *after, *next; */
/* nopush = false; */
/* next = after = LST_HEAD(Stack); */
/* while ( next = lst_next(next)) { */
/* if ( next->best_energy >= state->best_energy ) break; */
/* after = next; */
/* } */
/* lst_insertafter(Stack, state, after); */
/* } */
/*---------------------------------------------------------------------------*/
PRIVATE void *
pop(LIST * list)
{
void *data;
data = lst_deletenext(list, LST_HEAD(list));
return data;
}
/*---------------------------------------------------------------------------*/
/*auxiliary routines---------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
PRIVATE int
best_attainable_energy(STATE * state)
{
/* evaluation of best possible energy attainable within remaining intervals */
register int sum;
INTERVAL *next;
sum = state->partial_energy; /* energy of already found elements */
for (next = lst_first(state->Intervals); next; next = lst_next(next))
{
if (next->array_flag == 0)
sum += (circular) ? Fc : f5[next->j];
else if (next->array_flag == 1)
sum += fML[indx[next->j] + next->i];
else if (next->array_flag == 2)
sum += c[indx[next->j] + next->i];
else if (next->array_flag == 3)
sum += fM1[indx[next->j] + next->i];
else if (next->array_flag == 4)
sum += fc[next->i];
else if (next->array_flag == 5)
sum += fc[next->j];
else if (next->array_flag == 6)
sum += ggg[indx[next->j] + next->i];
}
return sum;
}
/*---------------------------------------------------------------------------*/
PRIVATE void
push_back(STATE * state)
{
push(Stack, copy_state(state));
return;
}
/*---------------------------------------------------------------------------*/
PRIVATE char*
get_structure(STATE * state)
{
char* structure;
structure = strdup(state->structure);
return structure;
}
/*---------------------------------------------------------------------------*/
PRIVATE int
compare(const void *solution1, const void *solution2)
{
if (((SOLUTION *) solution1)->energy > ((SOLUTION *) solution2)->energy)
return 1;
if (((SOLUTION *) solution1)->energy < ((SOLUTION *) solution2)->energy)
return -1;
return strcmp(((SOLUTION *) solution1)->structure,
((SOLUTION *) solution2)->structure);
}
/*---------------------------------------------------------------------------*/
PRIVATE void make_output(SOLUTION *SL, FILE *fp) /* prints stuff */
{
SOLUTION *sol;
for (sol = SL; sol->structure!=NULL; sol++)
if (cut_point<0) fprintf(fp, "%s %6.2f\n", sol->structure, sol->energy);
else {
char *tStruc;
tStruc=costring(sol->structure);
fprintf(fp, "%s %6.2f\n", tStruc, sol->energy);
free(tStruc);
}
}
/*---------------------------------------------------------------------------*/
/* start of subopt backtracking ---------------------------------------------*/
/*---------------------------------------------------------------------------*/
PUBLIC SOLUTION *subopt(char *seq, char *structure, int delta, FILE *fp){
return subopt_par(seq, structure, NULL, delta, fold_constrained, 0, fp);
}
PUBLIC SOLUTION *subopt_circ(char *seq, char *structure, int delta, FILE *fp){
return subopt_par(seq, structure, NULL, delta, fold_constrained, 1, fp);
}
PUBLIC SOLUTION *subopt_par(char *seq,
char *structure,
paramT *parameters,
int delta,
int is_constrained,
int is_circular,
FILE *fp){
STATE *state;
LIST *Intervals;
INTERVAL *interval;
SOLUTION *SolutionList;
unsigned long max_sol, n_sol;
int maxlevel, count, partial_energy, old_dangles, logML, dangle_model;
double structure_energy, min_en, eprint;
char *struc;
max_sol = 128;
n_sol = 0;
sequence = seq;
length = strlen(sequence);
circular = is_circular;
struct_constrained = is_constrained;
struc = (char *) space(sizeof(char)*(length+1));
if (struct_constrained) strncpy(struc, structure, length);
/* do mfe folding to get fill arrays and get ground state energy */
/* in case dangles is neither 0 or 2, set dangles=2 while folding */
if(P) free(P);
if(parameters){
P = get_parameter_copy(parameters);
} else {
model_detailsT md;
set_model_details(&md);
P = get_scaled_parameters(temperature, md);
}
logML = P->model_details.logML;
old_dangles = dangle_model = P->model_details.dangles;
with_gquad = P->model_details.gquad;
/* temporarily set dangles to 2 if necessary */
if((P->model_details.dangles != 0) && (P->model_details.dangles != 2))
P->model_details.dangles = 2;
turn = (cut_point<0) ? 3 : 0;
uniq_ML = 1;
if(circular){
min_en = fold_par(sequence, struc, P, struct_constrained, circular);
export_circfold_arrays(&Fc, &FcH, &FcI, &FcM, &fM2, &f5, &c, &fML, &fM1, &indx, &ptype);
/* restore dangle model */
P->model_details.dangles = old_dangles;
/* re-evaluate in case we're using logML etc */
min_en = energy_of_circ_struct_par(sequence, struc, P, 0);
} else {
min_en = cofold_par(sequence, struc, P, struct_constrained);
if(with_gquad){
export_cofold_arrays_gq(&f5, &c, &fML, &fM1, &fc, &ggg, &indx, &ptype);
} else {
export_cofold_arrays(&f5, &c, &fML, &fM1, &fc, &indx, &ptype);
}
/* restore dangle model */
P->model_details.dangles = old_dangles;
/* re-evaluate in case we're using logML etc */
min_en = energy_of_struct_par(sequence, struc, P, 0);
}
free(struc);
eprint = print_energy + min_en;
if (fp) {
char *SeQ;
SeQ=costring(sequence);
fprintf(fp, "%s %6d %6d\n", SeQ, (int) (-0.1+100*min_en), delta);
free(SeQ);
}
make_pair_matrix();
S = encode_sequence(sequence, 0);
S1 = encode_sequence(sequence, 1);
/* Initialize ------------------------------------------------------------ */
maxlevel = 0;
count = 0;
partial_energy = 0;
/* Initialize the stack ------------------------------------------------- */
minimal_energy = (circular) ? Fc : f5[length];
threshold = minimal_energy + delta;
if(threshold > INF){
warn_user("energy range too high, limiting to reasonable value");
threshold = INF-EMAX;
}
Stack = make_list(); /* anchor */
Intervals = make_list(); /* initial state: */
interval = make_interval(1, length, 0); /* interval [1,length,0] */
push(Intervals, interval);
state = make_state(Intervals, NULL, partial_energy,0);
/* state->best_energy = minimal_energy; */
push(Stack, state);
/* SolutionList stores the suboptimal structures found */
SolutionList = (SOLUTION *) space(max_sol*sizeof(SOLUTION));
/* end initialize ------------------------------------------------------- */
while (1) { /* forever, til nothing remains on stack */
maxlevel = (Stack->count > maxlevel ? Stack->count : maxlevel);
if (LST_EMPTY (Stack)) /* we are done! clean up and quit */
{
/* fprintf(stderr, "maxlevel: %d\n", maxlevel); */
lst_kill(Stack, free_state_node);
SolutionList[n_sol].structure = NULL; /* NULL terminate list */
if (subopt_sorted) {
/* sort structures by energy */
qsort(SolutionList, n_sol, sizeof(SOLUTION), compare);
if (fp) make_output(SolutionList, fp);
}
break;
}
/* pop the last element ---------------------------------------------- */
state = pop(Stack); /* current state to work with */
if (LST_EMPTY(state->Intervals))
{
int e;
/* state has no intervals left: we got a solution */
count++;
structure = get_structure(state);
structure_energy = state->partial_energy / 100.;
#ifdef CHECK_ENERGY
structure_energy = (circular) ? energy_of_circ_struct_par(sequence, structure, P, 0) : (with_gquad) ? energy_of_gquad_struct_par(sequence, structure, P, 0) : energy_of_struct_par(sequence, structure, P, 0);
if (!logML)
if ((double) (state->partial_energy / 100.) != structure_energy) {
fprintf(stderr, "%s %6.2f %6.2f\n", structure,
state->partial_energy / 100., structure_energy );
exit(1);
}
#endif
if (logML || (dangle_model==1) || (dangle_model==3)) { /* recalc energy */
structure_energy = (circular) ? energy_of_circ_struct_par(sequence, structure, P, 0) : (with_gquad) ? energy_of_gquad_struct_par(sequence, structure, P, 0) : energy_of_struct_par(sequence, structure, P, 0);
}
e = (int) ((structure_energy-min_en)*10. + 0.1); /* avoid rounding errors */
if (e>MAXDOS) e=MAXDOS;
density_of_states[e]++;
if (structure_energy>eprint) {
free(structure);
} else {
if (!subopt_sorted && fp) {
/* print and forget */
if (cut_point<0)
fprintf(fp, "%s %6.2f\n", structure, structure_energy);
else {
char * outstruc;
/*make ampersand seperated output if 2 sequences*/
outstruc=costring(structure);
fprintf(fp, "%s %6.2f\n", outstruc, structure_energy);
free(outstruc);
}
free(structure);
}
else {
/* store solution */
if (n_sol+1 == max_sol) {
max_sol *= 2;
SolutionList = (SOLUTION *)
xrealloc(SolutionList, max_sol*sizeof(SOLUTION));
}
SolutionList[n_sol].energy = structure_energy;
SolutionList[n_sol++].structure = structure;
}
}
}
else {
/* get (and remove) next interval of state to analyze */
interval = pop(state->Intervals);
scan_interval(interval->i, interval->j, interval->array_flag, state);
free_interval_node(interval); /* free the current interval */
}
free_state_node(state); /* free the current state */
} /* end of while (1) */
/* free arrays left over from cofold() */
free(S); free(S1);
(circular) ? free_arrays():free_co_arrays();
if (fp) { /* we've printed everything -- free solutions */
SOLUTION *sol;
for (sol=SolutionList; sol->structure != NULL; sol++)
free(sol->structure);
free(SolutionList);
SolutionList = NULL;
}
return SolutionList;
}
PRIVATE void
scan_interval(int i, int j, int array_flag, STATE * state)
{
/* real backtrack routine */
/* array_flag = 0: trace back in f5-array */
/* array_flag = 1: trace back in fML-array */
/* array_flag = 2: trace back in repeat() */
/* array_flag = 3: trace back in fM1-array */
STATE *new_state, *temp_state;
INTERVAL *new_interval;
register int k, fi, cij;
register int type;
register int dangle_model = P->model_details.dangles;
register int noGUclosure = P->model_details.noGUclosure;
register int noLP = P->model_details.noLP;
best_energy = best_attainable_energy(state); /* .. on remaining intervals */
nopush = true;
if ((i > 1) && (!array_flag))
nrerror ("Error while backtracking!");
if (j < i + turn + 1 && SAME_STRAND(i,j)) { /* minimal structure element */
if (nopush)
push_back(state);
return;
}
/* 13131313131313131313131313131313131313131313131313131313131313131313131 */
if (array_flag == 3 || array_flag == 1) {
/* array_flag = 3: interval i,j was generated during */
/* a multiloop decomposition using array fM1 in repeat() */
/* or in this block */
/* array_flag = 1: interval i,j was generated from a */
/* stack, bulge, or internal loop in repeat() */
/* or in this block */
if (array_flag == 3)
fi = fM1[indx[j-1] + i] + P->MLbase;
else
fi = fML[indx[j-1] + i] + P->MLbase;
if ((fi + best_energy <= threshold)&&(SAME_STRAND(j-1,j))) {
/* no basepair, nibbling of 3'-end */
new_state = copy_state(state);
new_interval = make_interval(i, j-1, array_flag);
push(new_state->Intervals, new_interval);
new_state->partial_energy += P->MLbase;
/* new_state->best_energy = fi + best_energy; */
push(Stack, new_state);
}
type = ptype[indx[j]+i];
if (type) { /* i,j may pair */
if(dangle_model)
element_energy = E_MLstem(type,
(((i > 1)&&(SAME_STRAND(i-1,i))) || circular) ? S1[i-1] : -1,
(((j < length)&&(SAME_STRAND(j,j+1))) || circular) ? S1[j+1] : -1,
P);
else
element_energy = E_MLstem(type, -1, -1, P);
cij = c[indx[j] + i] + element_energy;
if (cij + best_energy <= threshold)
repeat(i, j, state, element_energy, 0);
} else if (with_gquad){
element_energy = E_MLstem(0, -1, -1, P);
cij = ggg[indx[j] + i] + element_energy;
if(cij + best_energy <= threshold)
repeat_gquad(i, j, state, element_energy, 0);
}
} /* array_flag == 3 || array_flag == 1 */
/* 11111111111111111111111111111111111111111111111111111111111111111111111 */
if (array_flag == 1) {
/* array_flag = 1: interval i,j was generated from a */
/* stack, bulge, or internal loop in repeat() */
/* or in this block */
int stopp;
if ((SAME_STRAND(i-1,i))&&(SAME_STRAND(j,j+1))) { /*backtrack in FML only if multiloop is possible*/
for ( k = i+turn+1 ; k <= j-1-turn ; k++) {
/* Multiloop decomposition if i,j contains more than 1 stack */
if(with_gquad){
if(SAME_STRAND(k, k+1)){
element_energy = E_MLstem(0, -1, -1, P);
if(fML[indx[k]+i] + ggg[indx[j] + k + 1] + element_energy + best_energy <= threshold){
temp_state = copy_state (state);
new_interval = make_interval (i, k, 1);
push (temp_state->Intervals, new_interval);
repeat_gquad(k+1, j, temp_state, element_energy, fML[indx[k]+i]);
free_state_node(temp_state);
}
}
}
type = ptype[indx[j]+k+1];
if (type==0) continue;
if(dangle_model)
element_energy = E_MLstem(type,
(SAME_STRAND(i-1,i)) ? S1[k] : -1,
(SAME_STRAND(j,j+1)) ? S1[j+1] : -1,
P);
else
element_energy = E_MLstem(type, -1, -1, P);
if (SAME_STRAND(k,k+1)) {
if (fML[indx[k]+i] + c[indx[j] + k+1] +
element_energy + best_energy <= threshold) {
temp_state = copy_state (state);
new_interval = make_interval (i, k, 1);
push (temp_state->Intervals, new_interval);
repeat(k+1, j, temp_state, element_energy, fML[indx[k]+i]);
free_state_node(temp_state);
}
}
}
}
stopp=(cut_point>0)? (cut_point-2):(length); /*if cut_point -1: k on cut, => no ml*/
stopp=MIN2(stopp, j-1-turn);
if (i>cut_point) stopp=j-1-turn;
else if (i==cut_point) stopp=0; /*not a multi loop*/
for (k = i ; k <= stopp; k++) {
/* Multiloop decomposition if i,j contains only 1 stack */
if(with_gquad){
element_energy = E_MLstem(0, -1, -1, P) + P->MLbase*(k-i+1);
if(ggg[indx[j] + k + 1] + element_energy + best_energy <= threshold)
repeat_gquad(k+1, j, state, element_energy, 0);
}
type = ptype[indx[j]+k+1];
if (type==0) continue;
if(dangle_model)
element_energy = E_MLstem(type,
(SAME_STRAND(k-1,k)) ? S1[k] : -1,
(SAME_STRAND(j,j+1)) ? S1[j+1] : -1,
P);
else
element_energy = E_MLstem(type, -1, -1, P);
element_energy += P->MLbase*(k-i+1);
if (c[indx[j]+k+1] + element_energy + best_energy <= threshold)
repeat(k+1, j, state, element_energy, 0);
}
} /* array_flag == 1 */
/* 2222222222222222222222222222222222222222222222222222222222222222222222 */
if (array_flag == 2)
{
/* array_flag = 2: interval i,j was generated from a */
/* stack, bulge, or internal loop in repeat() */
repeat(i, j, state, 0, 0);
if (nopush){
if (!noLP){
fprintf(stderr, "%d,%d", i, j);
fprintf(stderr, "Oops, no solution in repeat!\n");
}
}
return;
}
/* 0000000000000000000000000000000000000000000000000000000000000000000000 */
if ((array_flag == 0) && !circular)
{
/* array_flag = 0: interval i,j was found while */
/* tracing back through f5-array and c-array */
/* or within this block */
if (f5[j-1] + best_energy <= threshold) {
/* no basepair, nibbling of 3'-end */
new_state = copy_state(state);
new_interval = make_interval(i, j-1 , 0);
push(new_state->Intervals, new_interval);
/* new_state->best_energy = f5[j-1] + best_energy; */
push(Stack, new_state);
}
for (k = j-turn-1; k > 1; k--) {
if(with_gquad){
if(SAME_STRAND(k,j)){
element_energy = 0;
if(f5[k-1] + ggg[indx[j]+k] + element_energy + best_energy <= threshold){
temp_state = copy_state(state);
new_interval = make_interval(1,k-1,0);
push(temp_state->Intervals, new_interval);
/* backtrace the quadruplex */
repeat_gquad(k, j, temp_state, element_energy, f5[k-1]);
free_state_node(temp_state);
}
}
}
type = ptype[indx[j]+k];
if (type==0) continue;
/* k and j pair */
if(dangle_model)
element_energy = E_ExtLoop(type,
(SAME_STRAND(k-1,k)) ? S1[k-1] : -1,
((j < length)&&(SAME_STRAND(j,j+1))) ? S1[j+1] : -1,
P);
else
element_energy = E_ExtLoop(type, -1, -1, P);
if (!(SAME_STRAND(k,j)))/*&&(state->is_duplex==0))*/ {
element_energy+=P->DuplexInit;
/*state->is_duplex=1;*/
}
if (f5[k-1] + c[indx[j]+k] + element_energy + best_energy <= threshold)
{
temp_state = copy_state(state);
new_interval = make_interval(1, k-1, 0);
push(temp_state->Intervals, new_interval);
repeat(k, j, temp_state, element_energy, f5[k-1]);
free_state_node(temp_state);
}
}
type = ptype[indx[j]+1];
if (type) {
if (dangle_model && (j < length)&&(SAME_STRAND(j,j+1)))
element_energy = E_ExtLoop(type, -1, S1[j+1], P);
else
element_energy = E_ExtLoop(type, -1, -1, P);
if (!(SAME_STRAND(1,j))) element_energy+=P->DuplexInit;
if (c[indx[j]+1] + element_energy + best_energy <= threshold)
repeat(1, j, state, element_energy, 0);
} else if (with_gquad){
if(SAME_STRAND(k,j)){
element_energy = 0;
if(ggg[indx[j]+1] + element_energy + best_energy <= threshold){
/* backtrace the quadruplex */
repeat_gquad(1, j, state, element_energy, 0);
}
}
}
}/* end array_flag == 0 && !circular*/
/* or do we subopt circular? */
else if(array_flag == 0){
int k, l, p, q;
/* if we've done everything right, we will never reach this case more than once */
/* right after the initilization of the stack with ([1,n], empty, 0) */
/* lets check, if we can have an open chain without breaking the threshold */
/* this is an ugly work-arround cause in case of an open chain we do not have to */
/* backtrack anything further... */
if(0 <= threshold){
new_state = copy_state(state);
new_interval = make_interval(1,2,0);
push(new_state->Intervals, new_interval);
new_state->partial_energy = 0;
push(Stack, new_state);
}
/* ok, lets check if we can do an exterior hairpin without breaking the threshold */
/* best energy should be 0 if we are here */
if(FcH + best_energy <= threshold){
/* lets search for all exterior hairpin cases, that fit into our threshold barrier */
/* we use index k,l to avoid confusion with i,j index of our state... */
/* if we reach here, i should be 1 and j should be n respectively */
for(k=i; k<j; k++)
for (l=k+turn+1; l <= j; l++){
int kl, type, u, tmpE, no_close;
u = j-l + k-1; /* get the hairpin loop length */
if(u<turn) continue;
kl = indx[l]+k; /* just confusing these indices ;-) */
type = ptype[kl];
no_close = ((type==3)||(type==4))&&noGUclosure;
type=rtype[type];
if (!type) continue;
if (!no_close){
/* now lets have a look at the hairpin energy */
char loopseq[10];
if (u<7){
strcpy(loopseq , sequence+l-1);
strncat(loopseq, sequence, k);
}
tmpE = E_Hairpin(u, type, S1[l+1], S1[k-1], loopseq, P);
}
if(c[kl] + tmpE + best_energy <= threshold){
/* what we really have to do is something like this, isn't it? */
/* we have to create a new state, with interval [k,l], then we */
/* add our loop energy as initial energy of this state and put */
/* the state onto the stack R... for further refinement... */
/* we also denote this new interval to be scanned in C */
new_state = copy_state(state);
new_interval = make_interval(k,l,2);
push(new_state->Intervals, new_interval);
/* hopefully we add this energy in the right way... */
new_state->partial_energy += tmpE;
push(Stack, new_state);
}
}
}
/* now lets see, if we can do an exterior interior loop without breaking the threshold */
if(FcI + best_energy <= threshold){
/* now we search for our exterior interior loop possibilities */
for(k=i; k<j; k++)
for (l=k+turn+1; l <= j; l++){
int kl, type, tmpE;
kl = indx[l]+k; /* just confusing these indices ;-) */
type = ptype[kl];
type=rtype[type];
if (!type) continue;
for (p = l+1; p < j ; p++){
int u1, qmin;
u1 = p-l-1;
if (u1+k-1>MAXLOOP) break;
qmin = u1+k-1+j-MAXLOOP;
if(qmin<p+turn+1) qmin = p+turn+1;
for(q = qmin; q <=j; q++){
int u2, type_2;
type_2 = rtype[ptype[indx[q]+p]];
if(!type_2) continue;
u2 = k-1 + j-q;
if(u1+u2>MAXLOOP) continue;
tmpE = E_IntLoop(u1, u2, type, type_2, S1[l+1], S1[k-1], S1[p-1], S1[q+1], P);
if(c[kl] + c[indx[q]+p] + tmpE + best_energy <= threshold){
/* ok, similar to the hairpin stuff, we add new states onto the stack R */
/* but in contrast to the hairpin decomposition, we have to add two new */
/* intervals, enclosed by k,l and p,q respectively and we also have to */
/* add the partial energy, that comes from the exterior interior loop */
new_state = copy_state(state);
new_interval = make_interval(k, l, 2);
push(new_state->Intervals, new_interval);
new_interval = make_interval(p,q,2);
push(new_state->Intervals, new_interval);
new_state->partial_energy += tmpE;
push(Stack, new_state);
}
}
}
}
}
/* and last but not least, we have a look, if we can do an exterior multiloop within the energy threshold */
if(FcM <= threshold){
/* this decomposition will be somehow more complicated...so lets see what we do here... */
/* first we want to find out which split inidices we can use without exceeding the threshold */
int tmpE2;
for (k=turn+1; k<j-2*turn; k++){
tmpE2 = fML[indx[k]+1]+fM2[k+1]+P->MLclosing;
if(tmpE2 + best_energy <= threshold){
/* grmpfh, we have found a possible split index k so we have to split fM2 and fML now */
/* lets do it first in fM2 anyway */
for(l=k+turn+2; l<j-turn-1; l++){
tmpE2 = fM1[indx[l]+k+1] + fM1[indx[j]+l+1];
if(tmpE2 + fML[indx[k]+1] + P->MLclosing <= threshold){
/* we've (hopefully) found a valid decomposition of fM2 and therefor we have all */
/* three intervals for our new state to be pushed on stack R */
new_state = copy_state(state);
/* first interval leads for search in fML array */
new_interval = make_interval(1, k, 1);
push(new_state->Intervals, new_interval);
/* next, we have the first interval that has to be traced in fM1 */
new_interval = make_interval(k+1, l, 3);
push(new_state->Intervals, new_interval);
/* and the last of our three intervals is also one to be traced within fM1 array... */
new_interval = make_interval(l+1, j, 3);
push(new_state->Intervals, new_interval);
/* mmh, we add the energy for closing the multiloop now... */
new_state->partial_energy += P->MLclosing;
/* next we push our state onto the R stack */
push(Stack, new_state);
}
/* else we search further... */
}
/* ok, we have to decompose fML now... */
}
}
}
} /* thats all folks for the circular case... */
/* 44444444444444444444444444444444444444444444444444444444444444 */
if (array_flag == 4) {
/* array_flag = 4: interval i,j was found while */
/* tracing back through fc-array smaller than than cut_point*/
/* or within this block */
if (fc[i+1] + best_energy <= threshold) {
/* no basepair, nibbling of 5'-end */
new_state = copy_state(state);
new_interval = make_interval(i+1, j , 4);
push(new_state->Intervals, new_interval);
push(Stack, new_state);
}
for (k = i+TURN+1; k < j; k++) {
if(with_gquad){
if(fc[k+1] + ggg[indx[k]+i] + best_energy <= threshold){
temp_state = copy_state(state);
new_interval = make_interval(k+1,j, 4);
push(temp_state->Intervals, new_interval);
repeat_gquad(i, k, temp_state, 0, fc[k+1]);
free_state_node(temp_state);
}
}
type = ptype[indx[k]+i];
if (type==0) continue;
/* k and j pair */
if (dangle_model)
element_energy = E_ExtLoop(type, (i > 1) ? S1[i-1]: -1, S1[k+1], P);
else /* no dangles */
element_energy = E_ExtLoop(type, -1, -1, P);
if (fc[k+1] + c[indx[k]+i] + element_energy + best_energy <= threshold) {
temp_state = copy_state(state);
new_interval = make_interval(k+1,j, 4);
push(temp_state->Intervals, new_interval);
repeat(i, k, temp_state, element_energy, fc[k+1]);
free_state_node(temp_state);
}
}
type = ptype[indx[j]+i];
if (type) {
if (dangle_model)
element_energy = E_ExtLoop(type, (i>1) ? S1[i-1] : -1, -1, P);
else
element_energy = E_ExtLoop(type, -1, -1, P);
if (c[indx[cut_point-1]+i] + element_energy + best_energy <= threshold)
repeat(i, cut_point-1, state, element_energy, 0);
} else if(with_gquad){
if(ggg[indx[cut_point -1] + i] + best_energy <= threshold)
repeat_gquad(i, cut_point - 1, state, 0, 0);
}
} /* array_flag == 4 */
/*55555555555555555555555555555555555555555555555555555555555555555555555*/
if (array_flag == 5) {
/* array_flag = 5: interval cut_point=i,j was found while */
/* tracing back through fc-array greater than cut_point */
/* or within this block */
if (fc[j-1] + best_energy <= threshold) {
/* no basepair, nibbling of 3'-end */
new_state = copy_state(state);
new_interval = make_interval(i, j-1 , 5);
push(new_state->Intervals, new_interval);
push(Stack, new_state);
}
for (k = j-TURN-1; k > i; k--) {
if(with_gquad){
if(fc[k-1] + ggg[indx[j] + k] + best_energy <= threshold){
temp_state = copy_state(state);
new_interval = make_interval(i, k-1, 5);
push(temp_state->Intervals, new_interval);
repeat_gquad(k, j, temp_state, 0, fc[k-1]);
free_state_node(temp_state);
}
}
type = ptype[indx[j]+k];
if (type==0) continue;
element_energy = 0;
/* k and j pair */
if (dangle_model)
element_energy = E_ExtLoop(type, S1[k-1], (j < length) ? S1[j+1] : -1, P);
else
element_energy = E_ExtLoop(type, -1, -1, P);
if (fc[k-1] + c[indx[j]+k] + element_energy + best_energy <= threshold) {
temp_state = copy_state(state);
new_interval = make_interval(i, k-1, 5);
push(temp_state->Intervals, new_interval);
repeat(k, j, temp_state, element_energy, fc[k-1]);
free_state_node(temp_state);
}
}
type = ptype[indx[j]+i];
if (type) {
if(dangle_model)
element_energy = E_ExtLoop(type, -1, (j<length) ? S1[j+1] : -1, P);
if (c[indx[j]+cut_point] + element_energy + best_energy <= threshold)
repeat(cut_point, j, state, element_energy, 0);
} else if (with_gquad){
if(ggg[indx[j] + cut_point] + best_energy <= threshold)
repeat_gquad(cut_point, j, state, 0, 0);
}
} /* array_flag == 5 */
if (array_flag == 6) { /* we have a gquad */
repeat_gquad(i, j, state, 0, 0);
if (nopush){
fprintf(stderr, "%d,%d", i, j);
fprintf(stderr, "Oops, no solution in gquad-repeat!\n");
}
return;
}
if (nopush)
push_back(state);
return;
}
/*---------------------------------------------------------------------------*/
PRIVATE void
repeat_gquad( int i,
int j,
STATE *state,
int part_energy,
int temp_energy){
/* find all gquads that fit into the energy range and the interval [i,j] */
STATE *new_state;
best_energy += part_energy; /* energy of current structural element */
best_energy += temp_energy; /* energy from unpushed interval */
if(SAME_STRAND(i,j)){
element_energy = ggg[indx[j] + i];
if(element_energy + best_energy <= threshold){
int cnt;
int *L;
int *l;
/* find out how many gquads we might expect in the interval [i,j] */
int num_gquads = get_gquad_count(S1, i, j);
num_gquads++;
L = (int *)space(sizeof(int) * num_gquads);
l = (int *)space(sizeof(int) * num_gquads * 3);
L[0] = -1;
get_gquad_pattern_exhaustive(S1, i, j, P, L, l, threshold - best_energy);
for(cnt = 0; L[cnt] != -1; cnt++){
new_state = copy_state(state);
make_gquad(i, L[cnt], &(l[3*cnt]), new_state);
new_state->partial_energy += part_energy;
new_state->partial_energy += element_energy;
/* new_state->best_energy =
hairpin[unpaired] + element_energy + best_energy; */
push(Stack, new_state);
}
free(L);
free(l);
}
}
best_energy -= part_energy;
best_energy -= temp_energy;
return;
}
PRIVATE void
repeat(int i, int j, STATE * state, int part_energy, int temp_energy)
{
/* routine to find stacks, bulges, internal loops and multiloops */
/* within interval closed by basepair i,j */
STATE *new_state;
INTERVAL *new_interval;
register int k, p, q, energy, new;
register int mm;
register int no_close, type, type_2;
int rt;
int dangle_model = P->model_details.dangles;
int noLP = P->model_details.noLP;
int noGUclosure = P->model_details.noGUclosure;
type = ptype[indx[j]+i];
if (type==0) fprintf(stderr, "repeat: Warning: %d %d can't pair\n", i,j);
no_close = (((type == 3) || (type == 4)) && noGUclosure);
if (noLP) /* always consider the structure with additional stack */
if ((i+turn+2<j) && ((type_2 = ptype[indx[j-1]+i+1]))) {
new_state = copy_state(state);
make_pair(i, j, new_state);
make_pair(i+1, j-1, new_state);
new_interval = make_interval(i+1, j-1, 2);
push(new_state->Intervals, new_interval);
if(SAME_STRAND(i,i+1) && SAME_STRAND(j-1,j))
energy = E_IntLoop(0, 0, type, rtype[type_2],S1[i+1],S1[j-1],S1[i+1],S1[j-1], P);
new_state->partial_energy += part_energy;
new_state->partial_energy += energy;
/* new_state->best_energy = new + best_energy; */
push(Stack, new_state);
if (i==1 || state->structure[i-2]!='(' || state->structure[j]!=')')
/* adding a stack is the only possible structure */
return;
}
best_energy += part_energy; /* energy of current structural element */
best_energy += temp_energy; /* energy from unpushed interval */
for (p = i + 1; p <= MIN2 (j-2-turn, i+MAXLOOP+1); p++) {
int minq = j-i+p-MAXLOOP-2;
if (minq<p+1+turn) minq = p+1+turn;
for (q = j - 1; q >= minq; q--) {
if ((noLP) && (p==i+1) && (q==j-1)) continue;
type_2 = ptype[indx[q]+p];
if (type_2==0) continue;
if (noGUclosure)
if (no_close||(type_2==3)||(type_2==4))
if ((p>i+1)||(q<j-1)) continue; /* continue unless stack */
if (SAME_STRAND(i,p) && SAME_STRAND(q,j)) {
energy = E_IntLoop(p-i-1, j-q-1, type, rtype[type_2],
S1[i+1],S1[j-1],S1[p-1],S1[q+1], P);
new = energy + c[indx[q]+p];
if (new + best_energy <= threshold) {
/* stack, bulge, or interior loop */
new_state = copy_state(state);
make_pair(i, j, new_state);
make_pair(p, q, new_state);
new_interval = make_interval(p, q, 2);
push(new_state->Intervals, new_interval);
new_state->partial_energy += part_energy;
new_state->partial_energy += energy;
/* new_state->best_energy = new + best_energy; */
push(Stack, new_state);
}
}/*end of if block */
} /* end of q-loop */
} /* end of p-loop */
if (!SAME_STRAND(i,j)) { /*look in fc*/
rt = rtype[type];
element_energy=0;
if (dangle_model)
element_energy = E_ExtLoop(rt, (SAME_STRAND(j-1,j)) ? S1[j-1] : -1, (SAME_STRAND(i,i+1)) ? S1[i+1] : -1, P);
else
element_energy = E_ExtLoop(rt, -1, -1, P);
if (fc[i+1] + fc[j-1] +element_energy + best_energy <= threshold)
{
INTERVAL *interval1, *interval2;
new_state = copy_state(state);
interval1 = make_interval(i+1, cut_point-1, 4);
interval2 = make_interval(cut_point, j-1, 5);
if (cut_point-i < j-cut_point) { /* push larger interval first */
push(new_state->Intervals, interval1);
push(new_state->Intervals, interval2);
} else {
push(new_state->Intervals, interval2);
push(new_state->Intervals, interval1);
}
make_pair(i, j, new_state);
new_state->partial_energy += part_energy;
new_state->partial_energy += element_energy;
push(Stack, new_state);
}
}
mm = P->MLclosing;
rt = rtype[type];
for (k = i + 1 + turn; k <= j - 2 - turn; k++) {
/* multiloop decomposition */
element_energy = mm;
if (dangle_model)
element_energy = E_MLstem(rt, S1[j-1], S1[i+1], P) + mm;
else
element_energy = E_MLstem(rt, -1, -1, P) + mm;
if ((fML[indx[k] + i+1] + fM1[indx[j-1] + k+1] +
element_energy + best_energy) <= threshold)
{
INTERVAL *interval1, *interval2;
new_state = copy_state(state);
interval1 = make_interval(i+1, k, 1);
interval2 = make_interval(k+1, j-1, 3);
if (k-i+1 < j-k-2) { /* push larger interval first */
push(new_state->Intervals, interval1);
push(new_state->Intervals, interval2);
} else {
push(new_state->Intervals, interval2);
push(new_state->Intervals, interval1);
}
make_pair(i, j, new_state);
new_state->partial_energy += part_energy;
new_state->partial_energy += element_energy;
/* new_state->best_energy = fML[indx[k] + i+1] + fM1[indx[j-1] + k+1]
+ element_energy + best_energy; */
push(Stack, new_state);
}
} /* end of k-loop */
if (SAME_STRAND(i,j)) {
if (no_close) element_energy = FORBIDDEN;
else
element_energy = E_Hairpin(j-i-1, type, S1[i+1], S1[j-1], sequence+i-1, P);
if (element_energy + best_energy <= threshold) {
/* hairpin structure */
new_state = copy_state(state);
make_pair(i, j, new_state);
new_state->partial_energy += part_energy;
new_state->partial_energy += element_energy;
/* new_state->best_energy =
hairpin[unpaired] + element_energy + best_energy; */
push(Stack, new_state);
}
if(with_gquad){
/* now we have to find all loops where (i,j) encloses a gquad in an interior loops style */
int cnt, *p, *q, *en;
p = q = en = NULL;
en = E_GQuad_IntLoop_exhaustive(i, j, &p, &q, type, S1, ggg, threshold - best_energy, indx, P);
for(cnt = 0; p[cnt] != -1; cnt++){
new_state = copy_state(state);
make_pair(i, j, new_state);
new_interval = make_interval(p[cnt], q[cnt], 6);
push(new_state->Intervals, new_interval);
new_state->partial_energy += part_energy;
new_state->partial_energy += en[cnt];
/* new_state->best_energy = new + best_energy; */
push(Stack, new_state);
}
free(en);
free(p);
free(q);
}
}
best_energy -= part_energy;
best_energy -= temp_energy;
return;
}
PRIVATE char *costring(char *string)
{
char *ctmp;
int len;
len = strlen(string);
ctmp = (char *)space((len+2) * sizeof(char));
/* first sequence */
if (cut_point<=0) {
(void) strncpy(ctmp, string, len);
return ctmp;
}
(void) strncpy(ctmp, string, cut_point-1);
/* spacer */
ctmp[cut_point-1] = '&';
/* second sequence */
(void) strcat(ctmp, string+cut_point-1);
return ctmp;
}
/*---------------------------------------------------------------------------*/
/* Well, that is the end!----------------------------------------------------*/
/*---------------------------------------------------------------------------*/
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
nstream.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* Copyright 1991-2013: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear, and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
/**********************************************************************
NAME: nstream
PURPOSE: To compute memory bandwidth when adding a vector of a given
number of double precision values to the scalar multiple of
another vector of the same length, and storing the result in
a third vector.
USAGE: The program takes as input the number of iterations to loop
over the triad vectors, the length of the vectors, and the
offset between vectors.
<progname> <# iterations> <vector length> <offset>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than MPI or standard C functions, the following
external functions are used in this program:
wtime()
bail_out()
checkTRIADresults()
NOTES: Bandwidth is determined as the number of words read, plus the
number of words written, times the size of the words, divided
by the execution time. For a vector length of N, the total
number of words read and written is 4*N*sizeof(double).
HISTORY: This code is loosely based on the Stream benchmark by John
McCalpin, but does not follow all the Stream rules. Hence,
reported results should not be associated with Stream in
external publications
REVISION: Modified by Rob Van der Wijngaart, December 2005, to
parameterize vector size and offsets through compiler flags.
Also removed all Stream cases except TRIAD.
REVISION: Modified by Rob Van der Wijngaart, March 2006, to handle MPI.
REVISION: Modified by Rob Van der Wijngaart, May 2006, to introduce
dependence between successive triad operations. This is
necessary to avoid dead code elimination
REVISION: Modified by Rob Van der Wijngaart, November 2014, replaced
timing of individual loop iterations with timing of overall
loop; also replaced separate loop establishing dependence
between iterations (must now be included in timing) with
accumulation: a[] += b[] + scalar*c[]
**********************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_mpiomp.h>
#define N MAXLENGTH
#if STATIC_ALLOCATION
/* use static to make sure it goes on the heap, not the stack */
static double a[N];
#else
static double * RESTRICT a;
#endif
static double * RESTRICT b;
static double * RESTRICT c;
#define SCALAR 3.0
static int checkTRIADresults(int, long int);
int main(int argc, char **argv)
{
long j, iter; /* dummies */
double scalar; /* constant used in Triad operation */
int iterations; /* number of times vector loop gets repeated */
long length, /* vector length per rank */
total_length, /* total vector length */
offset; /* offset between vectors a and b, and b and c */
double bytes; /* memory IO size */
size_t space; /* memory used for a single vector */
double local_nstream_time,/* timing parameters */
nstream_time,
avgtime;
int nthread_input; /* thread parameters */
int Num_procs, /* number of ranks */
my_ID, /* rank of calling rank */
root=0; /* ID of master rank */
int error=0; /* error flag for individual rank */
/**********************************************************************************
* process and test input parameters
***********************************************************************************/
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&Num_procs);
MPI_Comm_rank(MPI_COMM_WORLD,&my_ID);
if (my_ID == root) {
printf("Parallel Research Kernels version %s\n", PRKVERSION);
printf("MPI+OpenMP stream triad: A = B + scalar*C\n");
if (argc != 5){
printf("Usage: %s <#threads> <#iterations> <vector length> <offset>\n", *argv);
error = 1;
goto ENDOFTESTS;
}
/* Take number of threads to request from command line */
nthread_input = atoi(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
error = 1;
goto ENDOFTESTS;
}
iterations = atoi(*++argv);
if (iterations < 1) {
printf("ERROR: Invalid number of iterations: %d\n", iterations);
error = 1;
goto ENDOFTESTS;
}
total_length = atol(*++argv);
if (total_length < Num_procs) {
printf("ERROR: Invalid vector length: %ld\n", total_length);
error = 1;
goto ENDOFTESTS;
}
else length = total_length/Num_procs;
offset = atol(*++argv);
if (offset < 0) {
printf("ERROR: Invalid array offset: %ld\n", offset);
error = 1;
goto ENDOFTESTS;
}
#if STATIC_ALLOCATION
if ((3*length + 2*offset) > N) {
printf("ERROR: vector length/offset %ld/%ld too ", total_length, offset);
printf("large; increase MAXLENGTH in Makefile or decrease vector length\n");
error = 1;
goto ENDOFTESTS;
}
#endif
ENDOFTESTS:;
}
bail_out(error);
/* broadcast initialization data */
MPI_Bcast(&length, 1, MPI_LONG, root, MPI_COMM_WORLD);
MPI_Bcast(&offset, 1, MPI_LONG, root, MPI_COMM_WORLD);
MPI_Bcast(&iterations, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&nthread_input, 1, MPI_INT, root, MPI_COMM_WORLD);
omp_set_num_threads(nthread_input);
#if !STATIC_ALLOCATION
space = (3*length + 2*offset)*sizeof(double);
a = (double *) prk_malloc(space);
if (!a && my_ID == root) {
printf("ERROR: Could not allocate %ld bytes for vectors\n", (long int)space);
error = 1;
}
bail_out(error);
#endif
b = a + length + offset;
c = b + length + offset;
bytes = 4.0 * sizeof(double) * length * Num_procs;
if (my_ID == root) {
printf("Number of ranks = %d\n", Num_procs);
printf("Number of threads = %d\n", omp_get_max_threads());
printf("Vector length = %ld\n", total_length);
printf("Offset = %ld\n", offset);
printf("Number of iterations = %d\n", iterations);
}
#pragma omp parallel for simd
for (j=0; j<length; j++) {
a[j] = 0.0;
b[j] = 2.0;
c[j] = 2.0;
}
/* --- MAIN LOOP --- repeat Triad iterations times --- */
scalar = SCALAR;
for (iter=0; iter<=iterations; iter++) {
/* start timer after a warmup iteration */
if (iter == 1) {
MPI_Barrier(MPI_COMM_WORLD);
local_nstream_time = wtime();
}
#pragma omp parallel for simd
for (j=0; j<length; j++) a[j] += b[j]+scalar*c[j];
} /* end iterations */
/*********************************************************************
** Analyze and output results.
*********************************************************************/
local_nstream_time = wtime() - local_nstream_time;
MPI_Reduce(&local_nstream_time, &nstream_time, 1, MPI_DOUBLE, MPI_MAX, root,
MPI_COMM_WORLD);
if (my_ID == root) {
if (checkTRIADresults(iterations, length)) {
avgtime = nstream_time/iterations;
printf("Rate (MB/s): %lf Avg time (s): %lf\n",
1.0E-06 * bytes/avgtime, avgtime);
}
else error = 1;
}
bail_out(error);
MPI_Finalize();
}
int checkTRIADresults (int iterations, long int length) {
double aj, bj, cj, scalar, asum;
double epsilon = 1.e-8;
long int j;
int iter;
/* reproduce initialization */
aj = 0.0;
bj = 2.0;
cj = 2.0;
/* now execute timing loop */
scalar = SCALAR;
for (iter=0; iter<=iterations; iter++) aj += bj+scalar*cj;
aj = aj * (double) (length);
asum = 0.0;
#pragma omp parallel for simd reduction(+:asum)
for (j=0; j<length; j++) asum += a[j];
#if VERBOSE
printf ("Results Comparison: \n");
printf (" Expected checksum: %f\n",aj);
printf (" Observed checksum: %f\n",asum);
#endif
if (ABS(aj-asum)/asum > epsilon) {
printf ("Failed Validation on output array\n");
#if !VERBOSE
printf (" Expected checksum: %f \n",aj);
printf (" Observed checksum: %f \n",asum);
#endif
return (0);
}
else {
printf ("Solution validates\n");
return (1);
}
}
|
rawSHA256_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2010 by Solar Designer
* based on rawMD4_fmt.c code, with trivial changes by groszek.
*
* Understands hex hashes as well as Cisco "type 4" base64.
*
* Rewritten Spring 2013, JimF. SSE code added and released with the following terms:
* No copyright is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the public
* domain is deemed null and void, then the software is Copyright (c) 2011 JimF
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawSHA256;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawSHA256);
#else
#include "arch.h"
#include "sha2.h"
#include "stdint.h"
#include "params.h"
#include "common.h"
#include "johnswap.h"
#include "formats.h"
//#undef SIMD_COEF_32
//#undef SIMD_PARA_SHA256
/*
* Only effective for SIMD.
* Undef to disable reversing steps for benchmarking.
*/
#define REVERSE_STEPS
#ifdef _OPENMP
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#endif
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "Raw-SHA256"
#define FORMAT_NAME ""
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
/* Note: Cisco hashes are truncated at length 25. We currently ignore this. */
#ifdef SIMD_COEF_32
#define PLAINTEXT_LENGTH 55
#else
#define PLAINTEXT_LENGTH 125
#endif
#define _RAWSHA256_H
#include "rawSHA256_common.h"
#undef _RAWSHA256_H
#define BINARY_SIZE 4
#define SALT_SIZE 0
#define SALT_ALIGN 1
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32*SIMD_PARA_SHA256)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef SIMD_COEF_32
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
static uint32_t (*saved_key);
static uint32_t (*crypt_out);
#else
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)
[(DIGEST_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifndef SIMD_COEF_32
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
#else
saved_key = mem_calloc_align(self->params.max_keys_per_crypt * SHA_BUF_SIZ,
sizeof(*saved_key),
MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt * 8,
sizeof(*crypt_out),
MEM_ALIGN_SIMD);
#endif
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
#ifndef SIMD_COEF_32
MEM_FREE(saved_len);
#endif
}
static void *get_binary(char *ciphertext)
{
static unsigned int *outw;
unsigned char *out;
char *p;
int i;
if (!outw)
outw = mem_calloc_tiny(DIGEST_SIZE, MEM_ALIGN_WORD);
out = (unsigned char*)outw;
p = ciphertext + HEX_TAG_LEN;
for (i = 0; i < DIGEST_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
#ifdef SIMD_COEF_32
alter_endianity (out, DIGEST_SIZE);
#ifdef REVERSE_STEPS
sha256_reverse(outw);
#endif
#endif
return out;
}
#ifdef SIMD_COEF_32
#define HASH_IDX (((unsigned int)index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*8*SIMD_COEF_32)
static int get_hash_0 (int index) { return crypt_out[HASH_IDX] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[HASH_IDX] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[HASH_IDX] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[HASH_IDX] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[HASH_IDX] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[HASH_IDX] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[HASH_IDX] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#endif
static int binary_hash_0(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_0; }
static int binary_hash_1(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_1; }
static int binary_hash_2(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_2; }
static int binary_hash_3(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_3; }
static int binary_hash_4(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_4; }
static int binary_hash_5(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_5; }
static int binary_hash_6(void *binary) { return ((ARCH_WORD_32*)binary)[0] & PH_MASK_6; }
#ifdef SIMD_COEF_32
static void set_key(char *key, int index) {
#if ARCH_ALLOWS_UNALIGNED
const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key;
#else
char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t));
const ARCH_WORD_32 *wkey = (uint32_t*)(is_aligned(key, sizeof(uint32_t)) ?
key : strcpy(buf_aligned, key));
#endif
ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32 *)saved_key)[(index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((unsigned char)(temp = *wkey++)) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8));
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16));
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = JOHNSWAP(temp | (0x80U << 24));
len+=3;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP(temp);
len += 4;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = 0x80000000;
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuffer[15*SIMD_COEF_32] = len << 3;
}
#else
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_len[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
}
#endif
#ifdef SIMD_COEF_32
static char *get_key(int index) {
unsigned int i,s;
static char out[PLAINTEXT_LENGTH+1];
unsigned char *wucp = (unsigned char*)saved_key;
s = ((ARCH_WORD_32 *)saved_key)[15*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32] >> 3;
for(i=0;i<s;i++)
out[i] = wucp[ GETPOS(i, index) ];
out[i] = 0;
return (char*) out;
}
#else
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
#endif
#ifndef REVERSE_STEPS
#undef SSEi_REVERSE_STEPS
#define SSEi_REVERSE_STEPS 0
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
SIMDSHA256body(&saved_key[(unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32],
&crypt_out[(unsigned int)index/SIMD_COEF_32*8*SIMD_COEF_32],
NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN);
#else
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_key[index], saved_len[index]);
SHA256_Final((unsigned char *)crypt_out[index], &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
unsigned int index;
for (index = 0; index < count; index++)
#ifdef SIMD_COEF_32
if (((ARCH_WORD_32*) binary)[0] == crypt_out[HASH_IDX])
#else
if ( ((ARCH_WORD_32*)binary)[0] == crypt_out[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
return ((ARCH_WORD_32*)binary)[0] == crypt_out[HASH_IDX];
#else
return *(ARCH_WORD_32*)binary == crypt_out[index][0];
#endif
}
static int cmp_exact(char *source, int index)
{
ARCH_WORD_32 *binary = get_binary(source);
char *key = get_key(index);
SHA256_CTX ctx;
ARCH_WORD_32 crypt_out[DIGEST_SIZE / sizeof(ARCH_WORD_32)];
SHA256_Init(&ctx);
SHA256_Update(&ctx, key, strlen(key));
SHA256_Final((unsigned char*)crypt_out, &ctx);
#ifdef SIMD_COEF_32
alter_endianity(crypt_out, DIGEST_SIZE);
#ifdef REVERSE_STEPS
sha256_reverse(crypt_out);
#endif
#endif
return !memcmp(binary, crypt_out, DIGEST_SIZE);
}
struct fmt_main fmt_rawSHA256 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"SHA256 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD |
FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{
HEX_TAG,
CISCO_TAG
},
sha256_common_tests
}, {
init,
done,
fmt_default_reset,
sha256_common_prepare,
sha256_common_valid,
sha256_common_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
binary_hash_0,
binary_hash_1,
binary_hash_2,
binary_hash_3,
binary_hash_4,
binary_hash_5,
binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
AzureAD_fmt_plug.c | /*
* This software is Copyright (c) 2015 JimF, <jfoug at openwall.com>, and
* it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Azure ActiveDirectory, V1 cracker patch for JtR.
*
* Algorithm: https://www.dsinternals.com/en/how-azure-active-directory-connect-syncs-passwords/
*
* PBKDF2(UTF-16(uc(hex(MD4(UTF-16(password))))), rnd_salt(10), 100, HMAC-SHA256, 32)
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_AzureAD;
#elif FMT_REGISTERS_H
john_register_one(&fmt_AzureAD);
#else
#include <string.h>
#include "arch.h"
#include "md4.h"
#include "pbkdf2_hmac_sha256.h"
#include "common.h"
#include "formats.h"
#include "base64_convert.h"
#include "AzureAD_common.h"
#include "unicode.h"
#include "johnswap.h"
//#undef SIMD_COEF_32
//#undef SIMD_PARA_SHA256
#ifdef _OPENMP
#ifdef SIMD_COEF_32
#ifndef OMP_SCALE
#define OMP_SCALE 64 // FIXME
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 64 // FIXME
#endif
#endif
#include <omp.h>
#endif
#include "simd-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "AzureAD"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME
#ifdef SIMD_COEF_32
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA256)
#else
#define NBKEYS 1
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define BINARY_SIZE DIGEST_SIZE
#define BINARY_ALIGN 4
// For now, I will do md4() oSSL type for all passwords. There is so much
// other overhead that adding the complexity to do SIMD md4 will gain us
// almost nothing
#define PLAINTEXT_LENGTH 125
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static char (*saved_nt)[64];
static int dirty;
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key), MEM_ALIGN_WORD);
saved_nt = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_nt), MEM_ALIGN_WORD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*crypt_out), MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_nt);
MEM_FREE(saved_key);
}
static void *salt(char *ciphertext) {
char Buf[120], *ctcopy=Buf;
char *p;
static struct AzureAD_custom_salt cs;
memset(&cs, 0, sizeof(cs));
strncpy(Buf, ciphertext, 119);
Buf[119] = 0;
ctcopy += TAG_LENGTH;
p = strtokm(ctcopy, ",");
cs.salt_len = strlen(p)/2;
base64_convert(p, e_b64_hex, cs.salt_len*2, cs.salt, e_b64_raw, cs.salt_len, 0, 0);
p = strtokm(NULL, ",");
cs.iterations = atoi(p);
p = strtokm(Buf, ",");
strncpy(cs.version, p, 8);
cs.version[7] = 0;
return (void *)&cs;
}
static void set_salt(void *salt) {
AzureAD_cur_salt = (struct AzureAD_custom_salt *)salt;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_key(char *key, int index) {
strcpy(saved_key[index], key);
dirty = 1;
}
static char *get_key(int index) {
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt) {
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
// * PBKDF2(UTF-16(uc(hex(MD4(UTF-16(password))))), rnd_salt(10), 100, HMAC-SHA256, 32)
// Trivial for now. Can optimized later.
UTF16 Buf[PLAINTEXT_LENGTH+1];
unsigned char hash[16], hex[33];
int len, cnt, i;
MD4_CTX ctx;
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT];
union {
ARCH_WORD_32 *pout[MAX_KEYS_PER_CRYPT];
unsigned char *poutc;
} x;
cnt = MAX_KEYS_PER_CRYPT;
#else
cnt = 1;
#endif
if (dirty)
for(i = 0; i < cnt; ++i) {
len = enc_to_utf16(Buf, PLAINTEXT_LENGTH, (UTF8*)saved_key[index+i], strlen(saved_key[index+i]));
if (len < 0) len = 0;
MD4_Init(&ctx);
MD4_Update(&ctx, Buf, len*2);
MD4_Final(hash, &ctx);
base64_convert(hash, e_b64_raw, 16, hex, e_b64_hex, sizeof(hex), flg_Base64_HEX_UPCASE, 0);
for (len = 0; len < 32; ++len)
saved_nt[index+i][len<<1] = hex[len];
}
#ifdef SIMD_COEF_32
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = 64;
pin[i] = (unsigned char*)saved_nt[i+index];
x.pout[i] = crypt_out[i+index];
}
pbkdf2_sha256_sse((const unsigned char **)pin, lens, AzureAD_cur_salt->salt, AzureAD_cur_salt->salt_len, AzureAD_cur_salt->iterations, &(x.poutc), 32, 0);
#else
pbkdf2_sha256((unsigned char *)saved_nt[index], 64,
AzureAD_cur_salt->salt, AzureAD_cur_salt->salt_len,
AzureAD_cur_salt->iterations, (unsigned char*)crypt_out[index], 32, 0);
#endif
}
dirty = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], 4))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_AzureAD = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAG },
AzureAD_common_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
AzureAD_common_valid,
AzureAD_common_split,
AzureAD_common_get_binary,
salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__div_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_uint16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__div_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__div_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint16)
// A*D function (colscale): GB (_AxD__div_uint16)
// D*A function (rowscale): GB (_DxB__div_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__div_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__div_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint16)
// C=scalar+B GB (_bind1st__div_uint16)
// C=scalar+B' GB (_bind1st_tran__div_uint16)
// C=A+scalar GB (_bind2nd__div_uint16)
// C=A'+scalar GB (_bind2nd_tran__div_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_UNSIGNED (x, y, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_UINT16 || GxB_NO_DIV_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__div_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = GB_IDIV_UNSIGNED (x, bij, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = GB_IDIV_UNSIGNED (aij, y, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 16) ; \
}
GrB_Info GB (_bind1st_tran__div_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 16) ; \
}
GrB_Info GB (_bind2nd_tran__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_pack4to1.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack4to1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4to1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float sum = 0.f;
if (bias_data_ptr)
{
sum = bias_data_ptr[p];
}
v4f32 _sum = (v4f32)__msa_fill_w(0);
const float* kptr = (const float*)weight_data_pack4to1.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w * 4;
for (int k = 0; k < maxk; k++)
{
v4f32 _val = (v4f32)__msa_ld_w(sptr + space_ofs[k] * 4, 0);
v4f32 _w = (v4f32)__msa_ld_w(kptr, 0);
_sum = __msa_fmadd_w(_sum, _val, _w);
kptr += 4;
}
}
sum += __msa_fhadd_w(_sum);
sum = activation_ss(sum, activation_type, activation_params);
outptr[j] = sum;
}
outptr += outw;
}
}
}
|
DRB091-threadprivate2-orig-no.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
Use threadprivate to avoid data races.
This is the case for a variable referenced within a construct.
*/
#include <stdio.h>
#include <assert.h>
int sum0 = 0, sum1 = 0;
int main()
{
int len = 1000;
int i, sum = 0;
int _ret_val_0;
#pragma cetus private(i)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<len; i ++ )
{
}
sum0+=499500;
sum=(sum+sum0);
/* reference calculation */
#pragma cetus private(i)
#pragma loop name main#1
#pragma cetus parallel
#pragma omp parallel for private(i)
for (i=0; i<len; i ++ )
{
}
sum1+=(((-1*len)+(len*len))/2);
printf("sum=%d; sum1=%d\n", sum, sum1);
(((void)sizeof ((sum==sum1) ? 1 : 0)), ({
if (sum==sum1)
{
;
}
else
{
__assert_fail("sum==sum1", "DRB091-threadprivate2-orig-no.c", 74, __PRETTY_FUNCTION__);
}
}));
_ret_val_0=0;
return _ret_val_0;
}
|
bli_dotv_opt_var1.c | /*
BLIS
An object-based framework for developing high-performance BLAS-like
libraries.
Copyright (C) 2014, The University of Texas at Austin
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of The University of Texas at Austin nor the names
of its contributors may be used to endorse or promote products
derived derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "blis.h"
void bli_ddotv_opt_var1(
conj_t conjx,
conj_t conjy,
dim_t n,
double* restrict x, inc_t incx,
double* restrict y, inc_t incy,
double* restrict rho
)
{
bool_t use_ref = FALSE;
// If the vector lengths are zero, set rho to zero and return.
if ( bli_zero_dim1( n ) ) {
PASTEMAC(d,set0s)( rho );
return;
}
// If there is anything that would interfere with our use of aligned
// vector loads/stores, call the reference implementation.
if ( incx != 1 || incy != 1 || bli_is_unaligned_to( x, 32 ) || bli_is_unaligned_to( y, 32 ) )
use_ref = TRUE;
// Call the reference implementation if needed.
if ( use_ref ) {
BLIS_DDOTV_KERNEL_REF( conjx, conjy, n, x, incx, y, incy, rho );
return;
}
dim_t n_run = n / 4;
dim_t n_left = n % 4;
double rhos = 0.0;
#pragma omp parallel reduction(+:rhos)
{
dim_t n_threads;
dim_t t_id = omp_get_thread_num();
n_threads = omp_get_num_threads();
vector4double rhov = vec_splats( 0.0 );
vector4double xv, yv;
for ( dim_t i = t_id; i < n_run; i += n_threads )
{
xv = vec_lda( 0 * sizeof(double), &x[i*4] );
yv = vec_lda( 0 * sizeof(double), &y[i*4] );
rhov = vec_madd( xv, yv, rhov );
}
rhos += vec_extract( rhov, 0 );
rhos += vec_extract( rhov, 1 );
rhos += vec_extract( rhov, 2 );
rhos += vec_extract( rhov, 3 );
}
for ( dim_t i = 0; i < n_left; i++ )
{
rhos += x[4*n_run + i] * y[4*n_run + i];
}
*rho = rhos;
}
|
sdfgen.c | #include <math.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#ifdef _WIN32
#include <fcntl.h>
#include <io.h>
#endif
#include "df.h"
#define STB_IMAGE_IMPLEMENTATION
#include "stb/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb/stb_image_write.h"
enum FILETYPE { FT_NONE = -1, FT_PNG, FT_BMP, FT_JPG, FT_TGA };
static void error(const char* str, ...) {
va_list args;
va_start(args, str);
vfprintf(stderr, str, args);
putc('\n', stderr);
exit(-1);
}
static void usage() {
const char* usage =
"usage: chaq_sdfgen [-f filetype] -i file -o file [-q n] [-s n] [-ahln]\n"
" -f filetype: manually specify filetype among PNG, BMP, TGA, and JPG\n"
" (default: deduced by output filename. if not deducable, default is png)\n"
" -i file: input file\n"
" specify \"-\" to read input from stdin\n"
" -o file: output file\n"
" specify \"-\" to output to stdout\n"
" -q n: jpg quality (default: 100, only relevant for jpeg output)\n"
" -s n: spread radius in pixels (default: 64)\n"
" -a: asymmetric spread (disregard negative distances, becomes unsinged distance transformation)\n"
" (default: symmetric)\n"
" -h: show the usage\n"
" -l: test pixel based on image luminance (default: tests based on alpha channel)\n"
" -n: invert alpha test; values below threshold will be counted as \"inside\" (default: not inverted)";
puts(usage);
}
// transforms input image data into boolean buffer
static void transform_img_to_bool(const unsigned char* restrict img_in, bool* restrict bool_out, size_t width,
size_t height, size_t stride, size_t offset, bool test_above) {
ptrdiff_t i;
#pragma omp parallel for schedule(static)
for (i = 0; i < (ptrdiff_t)(width * height); ++i) {
unsigned char threshold = 127;
bool pixel = test_above ? img_in[(size_t)i * stride + offset] > threshold
: img_in[(size_t)i * stride + offset] < threshold;
bool_out[i] = pixel;
}
}
// transforms boolean buffer to float buffer
static void transform_bool_to_float(const bool* restrict bool_in, float* restrict float_out, size_t width,
size_t height, bool true_is_zero) {
ptrdiff_t i;
#pragma omp parallel for schedule(static)
for (i = 0; i < (ptrdiff_t)(width * height); ++i) {
float_out[i] = bool_in[(size_t)i] == true_is_zero ? 0.f : INFINITY;
}
}
// single-channel char array output of input floats
static void transform_float_to_byte(const float* restrict float_in, unsigned char* restrict byte_out, size_t width,
size_t height, size_t spread, bool asymmetric) {
ptrdiff_t i;
#pragma omp parallel for schedule(static)
for (i = 0; i < (ptrdiff_t)(width * height); ++i) {
// clamped linear remap
float s_min = asymmetric ? 0 : -(float)spread;
float s_max = (float)spread;
float d_min = 0.f;
float d_max = 255.f;
float sn = s_max - s_min;
float nd = d_max - d_min;
float v = float_in[i];
v = v > s_max ? s_max : v;
v = v < s_min ? s_min : v;
float remap = (((v - s_min) * nd) / sn) + d_min;
byte_out[(size_t)i] = (unsigned char)remap;
}
}
static void transform_float_sub(float* restrict float_dst, float* restrict float_by, size_t width, size_t height) {
ptrdiff_t i;
#pragma omp parallel for schedule(static)
for (i = 0; i < (ptrdiff_t)(width * height); ++i) {
float bias = -1.f;
float val = float_by[(size_t)i] > 0.f ? float_by[i] + bias : float_by[(size_t)i];
float_dst[(size_t)i] -= val;
}
}
static enum FILETYPE read_filetype(const char* string) {
const char* type_table[] = {"png", "bmp", "jpg", "tga"};
size_t n_types = sizeof(type_table) / sizeof(const char*);
for (size_t filetype = 0; filetype < n_types; ++filetype) {
if (strncmp(string, type_table[filetype], 3) == 0) return (enum FILETYPE)filetype;
}
return FT_NONE;
}
static void write_to_stdout(void* context, void* data, int size) {
(void)(context);
fwrite(data, (size_t)size, 1, stdout);
}
int main(int argc, char** argv) {
omp_set_nested(1);
char* infile = NULL;
char* outfile = NULL;
size_t test_channel = 1;
bool test_above = true;
bool asymmetric = false;
size_t spread = 64;
size_t quality = 100;
enum FILETYPE filetype = FT_NONE;
bool output_to_stdout = false;
bool open_from_stdin = false;
// process arguments
for (int i = 0; i < argc; ++i) {
if (argv[i][0] != '-') continue;
switch (argv[i][1]) {
// i - input file
case 'i': {
if (++i >= argc && infile == NULL) {
usage();
error("No input file specified.");
} else if (i < argc) {
if (strncmp("-", argv[i], 2) == 0) {
open_from_stdin = true;
#ifdef _WIN32
_setmode(_fileno(stdin), _O_BINARY);
#endif
}
infile = argv[i];
}
} break;
// o - output file
case 'o': {
if (++i >= argc && outfile == NULL) {
usage();
error("No output file specified.");
} else if (i < argc) {
if (strncmp("-", argv[i], 2) == 0) {
output_to_stdout = true;
#ifdef _WIN32
_setmode(_fileno(stdout), _O_BINARY);
#endif
}
outfile = argv[i];
}
} break;
// s - spread parameter
case 's': {
if (++i >= argc) {
usage();
error("No number specified with spread.");
}
spread = strtoull(argv[i], NULL, 10);
} break;
// q -- jpeg quality
case 'q': {
if (++i >= argc) {
usage();
error("No number specified with quality.");
}
quality = strtoull(argv[i], NULL, 10);
} break;
// f -- filetype
case 'f': {
if (++i >= argc) {
usage();
error("Filetype not specified with filetype switch.");
}
if ((filetype = read_filetype(argv[i])) == FT_NONE) {
usage();
error("Invalid filetype specified.");
}
} break;
// flags
default: {
size_t j = 1;
while (argv[i][j]) {
switch (argv[i][j]) {
// h - help
case 'h': {
usage();
return 0;
}
// n - invert (test for below threshold instead of above)
case 'n': {
test_above = false;
} break;
// l - test based on luminance
case 'l': {
test_channel = 0;
} break;
// a - asymmetric spread
case 'a': {
asymmetric = true;
} break;
}
++j;
}
} break;
}
}
if (!quality || quality > 100) {
usage();
error("Invalid value given for jpeg quality. Must be between 1-100");
}
if (!spread) {
usage();
error("Invalid value given for spread. Must be a positive integer.");
}
if (infile == NULL) {
usage();
error("No input file specified.");
}
if (outfile == NULL) {
usage();
error("No output file specified.");
}
// 2 channels sufficient to get alpha data of image
int w;
int h;
int n;
int c = 2;
unsigned char* img_original;
if (open_from_stdin) {
img_original = stbi_load_from_file(stdin, &w, &h, &n, c);
} else {
img_original = stbi_load(infile, &w, &h, &n, c);
}
if (img_original == NULL) error("Input file could not be opened.");
// transform image into bool image
bool* img_bool = malloc((size_t)(w * h) * sizeof(bool));
if (img_bool == NULL) error("img_bool malloc failed.");
transform_img_to_bool(img_original, img_bool, (size_t)w, (size_t)h, (size_t)c * sizeof(unsigned char), test_channel,
test_above);
stbi_image_free(img_original);
// compute 2d sdf images
// inside -- pixel distance to INSIDE
// outside -- pixel distance to OUTSIDE
float* img_float_inside = malloc((size_t)(w * h) * sizeof(float));
if (img_float_inside == NULL) error("img_float_inside malloc failed.");
float* img_float_outside = malloc((size_t)(w * h) * sizeof(float));
if (img_float_outside == NULL) error("img_float_outside malloc failed.");
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
{
transform_bool_to_float(img_bool, img_float_inside, (size_t)w, (size_t)h, true);
dist_transform_2d(img_float_inside, (size_t)w, (size_t)h);
}
#pragma omp section
{
transform_bool_to_float(img_bool, img_float_outside, (size_t)w, (size_t)h, false);
dist_transform_2d(img_float_outside, (size_t)w, (size_t)h);
}
}
free(img_bool);
// consolidate in the form of (outside - inside) to img_float_outside
transform_float_sub(img_float_outside, img_float_inside, (size_t)w, (size_t)h);
free(img_float_inside);
// transform distance values to pixel values
unsigned char* img_byte = malloc((size_t)(w * h) * sizeof(unsigned char));
if (img_byte == NULL) error("img_byte malloc failed.");
transform_float_to_byte(img_float_outside, img_byte, (size_t)w, (size_t)h, spread, asymmetric);
free(img_float_outside);
// deduce filetype if not specified
if (!output_to_stdout) {
char* dot = strrchr(outfile, '.');
if (dot != NULL && filetype == FT_NONE) {
filetype = read_filetype(dot + 1);
}
}
// output image
switch (filetype) {
case FT_BMP: {
// bmp
if (output_to_stdout) {
stbi_write_bmp_to_func(write_to_stdout, NULL, w, h, 1, img_byte);
} else {
stbi_write_bmp(outfile, w, h, 1, img_byte);
}
} break;
case FT_JPG: {
// jpg
if (output_to_stdout) {
stbi_write_jpg_to_func(write_to_stdout, NULL, w, h, 1, img_byte, (int)quality);
} else {
stbi_write_jpg(outfile, w, h, 1, img_byte, (int)quality);
}
} break;
case FT_TGA: {
// tga
if (output_to_stdout) {
stbi_write_tga_to_func(write_to_stdout, NULL, w, h, 1, img_byte);
} else {
stbi_write_tga(outfile, w, h, 1, img_byte);
}
} break;
case FT_PNG:
case FT_NONE: {
// png
if (output_to_stdout) {
stbi_write_png_to_func(write_to_stdout, NULL, w, h, 1, img_byte, w * (int)sizeof(unsigned char));
} else {
stbi_write_png(outfile, w, h, 1, img_byte, w * (int)sizeof(unsigned char));
}
} break;
}
free(img_byte);
return 0;
}
|
templatemath.h | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
/*
* templatemath.h
*
* Created on: Jan 1, 2016
* Author: agibsonccc
*/
#ifndef TEMPLATEMATH_H_
#define TEMPLATEMATH_H_
#include <dll.h>
#include <pointercast.h>
#include <platformmath.h>
#include <DataTypeUtils.h>
#define BFLOAT16_MAX_VALUE 32737.
#define HALF_MAX_VALUE 65504.
#define FLOAT_MAX_VALUE 3.4028235E38
#define DOUBLE_MAX_VALUE 1.7976931348623157E308
#define FLOAT_MIN_NORMAL 1.17549435e-38
#ifndef M_E
#define M_E 2.718281828459
#endif
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
namespace nd4j {
#ifdef __CUDACC__
#endif
namespace math {
template<typename T>
math_def inline T nd4j_abs(T value);
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2);
template<typename T>
math_def inline T nd4j_max(T val1, T val2);
template<typename T>
math_def inline T nd4j_min(T val1, T val2);
template <typename T>
math_def inline bool nd4j_eq(T val1, T val2, double eps);
template<typename T, typename Z>
math_def inline Z nd4j_re(T val1, T val2);
template<typename T, typename Z>
math_def inline Z nd4j_rint(T val1);
template<typename T, typename Z>
math_def inline Z nd4j_copysign(T val1, T val2);
template <typename T, typename Z>
math_def inline Z nd4j_softplus(T val);
template <typename T>
math_def inline T nd4j_rotl(T val, T shift);
template <typename T>
math_def inline T nd4j_rotr(T val, T shift);
//#ifndef __CUDACC__
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_dot(X *x, Y *y, int length);
//#endif
template<typename T, typename Z>
math_def inline Z nd4j_ceil(T val1);
template<typename T>
math_def inline bool nd4j_isnan(T val1);
template<typename T>
math_def inline bool nd4j_isinf(T val1);
template<typename T>
math_def inline bool nd4j_isfin(T val1);
template<typename T, typename Z>
math_def inline Z nd4j_cos(T val);
template<typename T, typename Z>
math_def inline Z nd4j_cosh(T val);
template<typename X, typename Z>
math_def inline Z nd4j_exp(X val);
template<typename T, typename Z>
math_def inline Z nd4j_floor(T val);
template<typename X, typename Z>
math_def inline Z nd4j_log(X val);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_pow(X val, Y val2);
template<typename T, typename Z>
math_def inline Z nd4j_round(T val);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_remainder(X num, Y denom);
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_fmod(X num, Y denom);
template<typename T, typename Z>
math_def inline Z nd4j_erf(T num);
template<typename T, typename Z>
math_def inline Z nd4j_erfc(T num);
math_def inline int32_t floatToRawIntBits(float d) {
union {
float f;
int32_t i;
} tmp;
tmp.f = d;
return tmp.i;
}
math_def inline float intBitsToFloat(int32_t i) {
union {
float f;
int32_t i;
} tmp;
tmp.i = i;
return tmp.f;
}
math_def inline float mulsignf(float x, float y) {
return intBitsToFloat(floatToRawIntBits(x) ^ (floatToRawIntBits(y) & (1 << 31)));
}
math_def inline float copysignfk(float x, float y) {
return intBitsToFloat((floatToRawIntBits(x) & ~(1 << 31)) ^ (floatToRawIntBits(y) & (1 << 31)));
}
template<typename T, typename Z>
math_def inline Z nd4j_sigmoid(T val) {
return (Z) 1.0f / ((Z) 1.0f + nd4j_exp<T, Z>(-val));
}
template<typename T, typename Z>
math_def inline Z nd4j_elu(T val, T alpha) {
if (val >= (T) 0.f)
return val;
return static_cast<Z>(alpha) * (nd4j_exp<T, Z>(val) - static_cast<Z>(1.0f));
}
template<typename T, typename Z>
math_def inline Z nd4j_leakyrelu(T val,T alpha) {
if (val < (T) 0.0f)
return alpha * val;
else
return val;
}
template<typename T, typename Z>
math_def inline Z nd4j_eluderivative(T val, T alpha) {
if (val >= static_cast<T>(0.0f))
return static_cast<Z>(1.0f);
return static_cast<Z>(alpha) * nd4j_exp<T, Z>(val);
//return val >= 0.0 ? 1.0 : nd4j_exp(val);
}
template<typename T, typename Z>
math_def inline Z nd4j_sin(T val);
template<typename T, typename Z>
math_def inline Z nd4j_sinh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_softplus(T val) {
return nd4j_log<T, Z>((Z) 1.0f + nd4j_exp<T, Z>(val));
}
template<typename T, typename Z>
math_def inline Z nd4j_softsign(T val) {
return val / ((T) 1.0f + nd4j::math::nd4j_abs<T>(val));
}
template<typename X, typename Z>
math_def inline Z nd4j_sqrt(X val);
template<typename X, typename Z>
math_def inline Z nd4j_tanh(X val);
template<typename T, typename Z>
math_def inline Z nd4j_tan(T val);
template<typename X, typename Z>
math_def inline Z nd4j_atan2(X val1, X val2);
template<typename X, typename Z>
math_def inline Z nd4j_atan2(X val1, X val2) {
return p_atan2<Z>(static_cast<Z>(val1), static_cast<Z>(val2));
}
template<typename T, typename Z>
math_def inline Z nd4j_tan(T tval) {
return p_tan<Z>(static_cast<Z>(tval));
}
template<typename T, typename Z>
math_def inline Z nd4j_tanhderivative(T val) {
Z tanh = nd4j_tanh<T,Z>(val);
return (Z) 1.0f - tanh * tanh;
}
template <typename T, typename Z>
math_def inline T nd4j_sigmoidderivative(T val) {
Z sigmoid = nd4j_sigmoid<T,Z>(val);
return sigmoid * ((Z) 1.0f - sigmoid);
}
template<typename T, typename Z>
math_def inline T nd4j_softsignderivative(T val) {
T y = (T) 1.0f + nd4j_abs(val);
return (Z) 1.0f / (y * y);
}
template<typename T, typename Z>
math_def inline T nd4j_sgn(T val) {
return val < (T) 0.0f ? (Z) -1.0f : val > (T) 0.0f ? (Z) 1.0f : (Z) 0.0f;
}
template<typename T, typename Z>
math_def inline Z nd4j_sign(T val) {
return nd4j_sgn<T, Z>(val);
}
template<typename T, typename Z>
math_def inline Z nd4j_signum(T val) {
return nd4j_sgn<T, Z>(val);
}
template<typename X, typename Z>
math_def inline Z nd4j_gamma(X a);
template<typename X, typename Z>
math_def inline Z nd4j_lgamma(X x);
//#ifndef __CUDACC__
/*
template<>
math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) {
float16 dot = (float16) 0.0f;
// TODO: since we can't use simd on unions, we might use something else here.
for(int e = 0; e < length; e++) {
dot += x[e] * y[e];
}
return dot;
}
*/
template<typename X, typename Y, typename Z>
math_def inline Z nd4j_dot(X *x, Y *y, int length) {
Z dot = (Z)0.0f;
for(int e = 0; e < length; e++) {
dot += static_cast<Z>(x[e]) * static_cast<Z>(y[e]);
}
return dot;
}
//#endif
template<typename T, typename Z>
math_def inline Z nd4j_acos(T val);
template<typename T, typename Z>
math_def inline Z nd4j_sech(T val);
template<typename T, typename Z>
math_def inline Z nd4j_acosh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asin(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asinh(T val);
template<typename T, typename Z>
math_def inline Z nd4j_asinh(T val) {
//Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x)
return nd4j_log<Z, Z>(nd4j_sqrt<Z, Z>(nd4j_pow<T,T,Z>(val, (T) 2) + (Z) 1.f) + (Z) val);
}
template<typename T, typename Z>
math_def inline Z nd4j_atan(T val);
template<typename T, typename Z>
math_def inline Z nd4j_atanh(T val);
template<>
math_def inline float16 nd4j_abs<float16>(float16 value) {
#ifdef NATIVE_HALFS
if (value < (float16) 0.f) {
return float16(__hneg(value.data));
} else
return value;
#else
return (float16) fabsf((float) value);
#endif
}
template<>
math_def inline bfloat16 nd4j_abs<bfloat16>(bfloat16 value) {
return (bfloat16) fabsf((float) value);
}
template<>
math_def inline float nd4j_abs<float>(float value) {
return fabsf(value);
}
template<>
math_def inline double nd4j_abs<double>(double value) {
return fabs(value);
}
template<>
math_def inline int nd4j_abs<int>(int value) {
return abs(value);
}
template<>
math_def inline Nd4jLong nd4j_abs<Nd4jLong>(Nd4jLong value) {
return llabs(value);
}
template<>
math_def inline bool nd4j_abs<bool>(bool value) {
return value;
}
template<>
math_def inline uint8_t nd4j_abs<uint8_t>(uint8_t value) {
return value;
}
template<>
math_def inline uint16_t nd4j_abs<uint16_t>(uint16_t value) {
return value;
}
template<>
math_def inline uint32_t nd4j_abs<uint32_t>(uint32_t value) {
return value;
}
template<>
math_def inline Nd4jULong nd4j_abs<Nd4jULong>(Nd4jULong value) {
return value;
}
template<>
math_def inline int8_t nd4j_abs<int8_t>(int8_t value) {
return value < 0 ? -value : value;
}
template<>
math_def inline int16_t nd4j_abs<int16_t>(int16_t value) {
return value < 0 ? -value : value;
}
template<>
math_def inline bool nd4j_isnan<float16>(float16 value) {
return *(value.data.getXP()) == 0x7fffU;
}
template<>
math_def inline bool nd4j_isnan<bfloat16>(bfloat16 value) {
return value == bfloat16::nan(); //0x7fffU;
}
template<>
math_def inline bool nd4j_isnan<float>(float value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<double>(double value) {
return value != value;
}
template<>
math_def inline bool nd4j_isnan<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint32_t>(uint32_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint16_t>(uint16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<uint8_t>(uint8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<int16_t>(int16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<int8_t>(int8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<bool>(bool value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<Nd4jLong>(Nd4jLong value) {
return false;
}
template<>
math_def inline bool nd4j_isnan<Nd4jULong>(Nd4jULong value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<float16>(float16 value) {
return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<bfloat16>(bfloat16 value) {
return value < (bfloat16) -BFLOAT16_MAX_VALUE || value > (bfloat16) BFLOAT16_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<float>(float value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<double>(double value) {
#ifdef __CUDACC__
return isinf(value);
#else
return std::isinf(value);
#endif
//return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE;
}
template<>
math_def inline bool nd4j_isinf<int>(int value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint32_t>(uint32_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint16_t>(uint16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<uint8_t>(uint8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<int16_t>(int16_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<int8_t>(int8_t value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<bool>(bool value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<Nd4jLong>(Nd4jLong value) {
return false;
}
template<>
math_def inline bool nd4j_isinf<Nd4jULong>(Nd4jULong value) {
return false;
}
template<typename T>
math_def inline bool nd4j_isfin(T value) {
return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value);
}
template<>
math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) {
return (float16) copysignf((float) val1, (float) val2);
}
template<>
math_def inline float nd4j_copysign<float>(float val1, float val2) {
return copysignf(val1, val2);
}
template<>
math_def inline double nd4j_copysign<double>(double val1, double val2) {
return copysign(val1, val2);
}
template<>
math_def inline int nd4j_copysign<int>(int val1, int val2) {
if (val2 < 0) return -(nd4j_abs<int>(val1));
else return nd4j_abs<int>(val1);
}
template<>
math_def inline Nd4jLong nd4j_copysign<Nd4jLong>(Nd4jLong val1, Nd4jLong val2) {
if (val2 < 0) return -(nd4j_abs<Nd4jLong>(val1));
else return nd4j_abs<Nd4jLong>(val1);
}
template<>
math_def inline bool nd4j_max(bool val1, bool val2) {
return (val1 || val2) ? true : false;
}
template<typename T>
math_def inline T nd4j_max(T val1, T val2) {
return val1 > val2 ? val1 : val2;
}
template<>
math_def inline bool nd4j_min(bool val1, bool val2) {
return (val1 && val2) ? true : false;
}
template<typename T>
math_def inline T nd4j_min(T val1, T val2) {
return val1 < val2 ? val1 : val2;
}
template <typename T>
math_def inline bool nd4j_eq(T d1, T d2, double eps) {
if (nd4j::math::nd4j_isinf<T>(d1) && nd4j::math::nd4j_isinf<T>(d2)) {
if (d1 > 0 && d2 > 0)
return true;
else if (d1 < 0 && d2 < 0)
return true;
else
return false;
}
auto diff = static_cast<double>(nd4j::math::nd4j_abs<T>(d1 - d2));
// works well except in the range of very large numbers
if (diff <= eps)
return true;
// Knuth approach
// works well except in the range of very small numbers
if (diff <= nd4j::math::nd4j_max<double>(nd4j::math::nd4j_abs<double>(static_cast<double>(d1)), nd4j::math::nd4j_abs<double>(static_cast<double>(d2))) * eps)
return true;
return false;
}
template <typename X, typename Z>
math_def inline Z nd4j_ceil(X val) {
return static_cast<Z>(p_ceil<X>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_round(X val) {
return static_cast<Z>(p_round<X>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_asin(X val) {
return p_asin<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_atan(X val) {
return p_atan<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_atanh(X val) {
return p_atanh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_cosh(X val) {
return p_cosh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_rint(X val) {
return p_rint<X>(val);
}
template <typename X, typename Z>
math_def inline Z nd4j_sinh(X val) {
return p_sinh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_acos(X val) {
return p_acos<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_sech(X val) {
return static_cast<Z>(1) / nd4j_cosh<X,Z>(val);
}
template <typename X, typename Z>
math_def inline Z nd4j_acosh(X val) {
return p_acosh<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_cos(X val) {
return p_cos<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_exp(X val) {
return p_exp<X>(val);
}
template<typename X, typename Z>
math_def inline Z nd4j_floor(X val) {
return static_cast<Z>(p_floor<X>(val));
}
template<typename X, typename Z>
math_def inline Z nd4j_log(X val) {
return static_cast<Z>(p_log<X>(val));
}
/**
* This func is special case - it must return floating point value, and optionally Y arg can be floating point argument
* @tparam X
* @tparam Y
* @tparam Z
* @param val
* @param val2
* @return
*/
template <>
math_def inline float nd4j_pow(float val, float val2) {
return p_pow<float>(val, val2);
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_pow(X val, Y val2) {
return p_pow<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
/**
* LogGamma(a) - float point extension of ln(n!)
**/
template <typename X, typename Z>
math_def inline Z nd4j_lgamma(X x) {
// if (x <= X(0.0))
// {
// std::stringstream os;
// os << "Logarithm of Gamma has sence only for positive values, but " << x << " was given.";
// throw std::invalid_argument( os.str() );
// }
if (x < X(12.0)) {
return nd4j_log<Z,Z>(nd4j_gamma<X,Z>(x));
}
// Abramowitz and Stegun 6.1.41
// Asymptotic series should be good to at least 11 or 12 figures
// For error analysis, see Whittiker and Watson
// A Course in Modern Analysis (1927), page 252
static const double c[8] = {
1.0/12.0,
-1.0/360.0,
1.0/1260.0,
-1.0/1680.0,
1.0/1188.0,
-691.0/360360.0,
1.0/156.0,
-3617.0/122400.0
};
double z = Z(1.0 / Z(x * x));
double sum = c[7];
for (int i = 6; i >= 0; i--) {
sum *= z;
sum += c[i];
}
double series = sum / Z(x);
static const double halfLogTwoPi = 0.91893853320467274178032973640562;
return Z((double(x) - 0.5) * nd4j_log<X,double>(x) - double(x) + halfLogTwoPi + series);
}
template<typename T>
math_def inline T nd4j_re(T val1, T val2) {
if (val1 == (T) 0.0f && val2 == (T) 0.0f)
return (T) 0.0f;
return nd4j_abs<T>(val1 - val2) / (nd4j_abs<T>(val1) + nd4j_abs<T>(val2));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_remainder(X val, Y val2) {
return p_remainder<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_fmod(X val, Y val2) {
return p_fmod<Z>(static_cast<Z>(val), static_cast<Z>(val2));
}
template <typename X, typename Z>
math_def inline Z nd4j_sin(X val) {
return p_sin<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_sqrt(X val) {
return p_sqrt<Z>(static_cast<Z>(val));
}
template <typename X>
math_def inline X neg_tanh(X val) {
X o = static_cast<X>(1.0f);
X t = static_cast<X>(2.0f);
X e = static_cast<X>(M_E);
auto p = nd4j::math::nd4j_pow<X, X, X>(e, val * t);
return (p - o)/ (p + o);
}
template <typename X>
math_def inline X pos_tanh(X val) {
X o = static_cast<X>(1.0f);
X t = static_cast<X>(-2.0f);
X e = static_cast<X>(M_E);
auto p = nd4j::math::nd4j_pow<X, X, X>(e, val * t);
return (o - p) / (o + p);
}
math_def inline float neu_tanh(float val, float sign) {
float e(M_E);
float av = sign * val;
auto p = nd4j::math::nd4j_pow<float, float, float>(e, -av * 2.f);
return (1 - p) / (1 + p);
}
template <>
math_def inline float nd4j_tanh(float val) {
float sign = copysignfk(1.0f, val);
return sign * neu_tanh(val, sign);
}
template <typename X, typename Z>
math_def inline Z nd4j_tanh(X val) {
return val <= 0 ? neg_tanh(val) : pos_tanh(val);
}
template <typename T>
math_def inline T nd4j_rotl(T val, T shift) {
return p_rotl<T>(val, shift);
}
template <typename T>
math_def inline T nd4j_rotr(T val, T shift) {
return p_rotr<T>(val, shift);
}
template <typename X, typename Z>
math_def inline Z nd4j_erf(X val) {
return p_erf<Z>(static_cast<Z>(val));
}
template <typename X, typename Z>
math_def inline Z nd4j_erfc(X val) {
return p_erfc<Z>(static_cast<Z>(val));
}
template<typename T>
math_def inline void nd4j_swap(T &val1, T &val2) {
T temp = val1; val1=val2; val2=temp;
};
template <typename X, typename Z>
math_def inline Z nd4j_gamma(X a) {
// nd4j_lgamma<X,Z>(a);
// return (Z)std::tgamma(a);
// Split the function domain into three intervals:
// (0, 0.001), [0.001, 12), and (12, infinity)
///////////////////////////////////////////////////////////////////////////
// First interval: (0, 0.001)
//
// For small a, 1/Gamma(a) has power series a + gamma a^2 - ...
// So in this range, 1/Gamma(a) = a + gamma a^2 with error on the order of a^3.
// The relative error over this interval is less than 6e-7.
const double eulerGamma = 0.577215664901532860606512090; // Euler's gamma constant
if (a < X(0.001))
return Z(1.0 / ((double)a * (1.0 + eulerGamma * (double)a)));
///////////////////////////////////////////////////////////////////////////
// Second interval: [0.001, 12)
if (a < X(12.0)) {
// The algorithm directly approximates gamma over (1,2) and uses
// reduction identities to reduce other arguments to this interval.
double y = (double)a;
int n = 0;
bool argWasLessThanOne = y < 1.0;
// Add or subtract integers as necessary to bring y into (1,2)
// Will correct for this below
if (argWasLessThanOne) {
y += 1.0;
}
else {
n = static_cast<int>(floor(y)) - 1; // will use n later
y -= n;
}
// numerator coefficients for approximation over the interval (1,2)
static const double p[] = {
-1.71618513886549492533811E+0,
2.47656508055759199108314E+1,
-3.79804256470945635097577E+2,
6.29331155312818442661052E+2,
8.66966202790413211295064E+2,
-3.14512729688483675254357E+4,
-3.61444134186911729807069E+4,
6.64561438202405440627855E+4
};
// denominator coefficients for approximation over the interval (1,2)
static const double q[] = {
-3.08402300119738975254353E+1,
3.15350626979604161529144E+2,
-1.01515636749021914166146E+3,
-3.10777167157231109440444E+3,
2.25381184209801510330112E+4,
4.75584627752788110767815E+3,
-1.34659959864969306392456E+5,
-1.15132259675553483497211E+5
};
double num = 0.0;
double den = 1.0;
double z = y - 1;
for (auto i = 0; i < 8; i++) {
num = (num + p[i]) * z;
den = den * z + q[i];
}
double result = num / den + 1.0;
// Apply correction if argument was not initially in (1,2)
if (argWasLessThanOne) {
// Use identity gamma(z) = gamma(z+1)/z
// The variable "result" now holds gamma of the original y + 1
// Thus we use y-1 to get back the orginal y.
result /= (y - 1.0);
}
else {
// Use the identity gamma(z+n) = z*(z+1)* ... *(z+n-1)*gamma(z)
for (auto i = 0; i < n; i++)
result *= y++;
}
return Z(result);
}
///////////////////////////////////////////////////////////////////////////
// Third interval: [12, infinity)
if (a > 171.624) {
// Correct answer too large to display. Force +infinity.
return Z(DOUBLE_MAX_VALUE);
// return DataTypeUtils::infOrMax<Z>();
}
return nd4j::math::nd4j_exp<Z,Z>(nd4j::math::nd4j_lgamma<X,Z>(a));
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_igamma(X a, Y x) {
Z aim = nd4j_pow<X, X, Z>(x, a) / (nd4j_exp<X, Z>(x) * nd4j_gamma<Y, Z>(a));
auto sum = Z(0.);
auto denom = Z(1.);
if (a <= X(0.000001))
//throw std::runtime_error("Cannot calculate gamma for a zero val.");
return Z(0);
for (int i = 0; Z(1./denom) > Z(1.0e-12); i++) {
denom *= (a + i);
sum += nd4j_pow<X, int, Z>(x, i) / denom;
}
return aim * sum;
}
template <typename X, typename Y, typename Z>
math_def inline Z nd4j_igammac(X a, Y x) {
return Z(1.) - nd4j_igamma<X, Y, Z>(a, x);
}
#ifdef __CUDACC__
namespace atomics {
template <typename T>
inline __device__ T nd4j_atomicAdd(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicSub(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMul(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicDiv(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMin(T* address, T val);
template <typename T>
inline __device__ T nd4j_atomicMax(T* address, T val);
template <>
inline __device__ int32_t nd4j_atomicMin<int32_t>(int32_t* address, int32_t val) {
return atomicMin(address, val);
}
template <>
inline __device__ uint32_t nd4j_atomicMin<uint32_t>(uint32_t* address, uint32_t val) {
return atomicMin(address, val);
}
template <>
inline __device__ float nd4j_atomicMin<float>(float* address, float val) {
int* address_as_ull = (int*)address;
int old = __float_as_int(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_min(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ double nd4j_atomicMin<double>(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_min(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ uint64_t nd4j_atomicMin<uint64_t>(uint64_t* address, uint64_t val) {
#if __CUDA_ARCH__ >= 350
return atomicMin((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_min((unsigned long long)val, assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ Nd4jLong nd4j_atomicMin<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
#if __CUDA_ARCH__ >= 350
return atomicMin((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = (unsigned long long)val, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_min(val, (Nd4jLong)assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ int16_t nd4j_atomicMin<int16_t>(int16_t* address, int16_t val) {
int32_t temp = *address;
*address = atomicMin(&temp, (int)val);
return *address;
}
template <>
inline __device__ bfloat16 nd4j_atomicMin<bfloat16>(bfloat16* address, bfloat16 val) {
return bfloat16(nd4j_atomicMin<int16_t>(&address->_data, val._data));
}
template <>
inline __device__ float16 nd4j_atomicMin<float16>(float16* address, float16 val) {
return float16(nd4j_atomicMin<int16_t>(reinterpret_cast<int16_t*>(&address->data), (int16_t)val.data));
}
template <>
inline __device__ int32_t nd4j_atomicMax<int32_t>(int32_t* address, int32_t val) {
return atomicMax(address, val);
}
template <>
inline __device__ uint32_t nd4j_atomicMax<uint32_t>(uint32_t* address, uint32_t val) {
return atomicMax(address, val);
}
template <>
inline __device__ double nd4j_atomicMax<double>(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(math::nd4j_max(val, __longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ float nd4j_atomicMax<float>(float* address, float val) {
int* address_as_ull = (int*)address;
int old = __float_as_int(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(math::nd4j_max(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ uint8_t nd4j_atomicMin<uint8_t>(uint8_t* address, uint8_t val) {
uint32_t temp = *address;
*address = atomicMin(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int8_t nd4j_atomicMin<int8_t>(int8_t* address, int8_t val) {
int32_t temp = *address;
*address = atomicMin(&temp, (int)val);
return *address;
}
template <>
inline __device__ uint16_t nd4j_atomicMin<uint16_t>(uint16_t* address, uint16_t val) {
uint32_t temp = *address;
*address = atomicMin(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ uint8_t nd4j_atomicMax<uint8_t>(uint8_t* address, uint8_t val) {
uint32_t temp = *address;
*address = atomicMax(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int8_t nd4j_atomicMax<int8_t>(int8_t* address, int8_t val) {
int32_t temp = *address;
*address = atomicMax(&temp, (int)val);
return *address;
}
template <>
inline __device__ uint16_t nd4j_atomicMax<uint16_t>(uint16_t* address, uint16_t val) {
uint32_t temp = *address;
*address = atomicMax(&temp, (uint32_t)val);
return *address;
}
template <>
inline __device__ int16_t nd4j_atomicMax<int16_t>(int16_t* address, int16_t val) {
int32_t temp = *address;
*address = atomicMax(&temp, (int32_t)val);
return *address;
}
template <>
inline __device__ float16 nd4j_atomicMax<float16>(float16* address, float16 val) {
auto address_as_ull = (int*) address;
long addr = (long) address;
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
PAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
float16 res = nd4j_max((float16) old.B.H, val);
fresh.B.H = res.data;
fresh.B.L = old.B.L;
} else {
float16 res = nd4j_max((float16) old.B.L, val);
fresh.B.L = res.data;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ bfloat16 nd4j_atomicMax<bfloat16>(bfloat16* address, bfloat16 val) {
auto address_as_ull = (int*) address;
long addr = (long)(address);
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
BPAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
bfloat16 res = nd4j_max(old.B.H, val);
fresh.B.H = res;
fresh.B.L = old.B.L;
} else {
bfloat16 res = nd4j_max(old.B.L, val);
fresh.B.L = res;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <>
inline __device__ uint64_t nd4j_atomicMax<uint64_t>(uint64_t* address, uint64_t val) {
#if __CUDA_ARCH__ >= 350
return atomicMax((unsigned long long*)address, (unsigned long long)val);
#else
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = __double_as_longlong(val), assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, math::nd4j_max((unsigned long long)val, assumed));
} while (assumed != old);
return old;
#endif
}
template <>
inline __device__ Nd4jLong nd4j_atomicMax<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* address_as_ull = (unsigned long long int *) address;
//return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (unsigned long long)nd4j_max(val, (Nd4jLong)assumed));
} while (assumed != old);
return old;
}
template <>
inline __device__ double nd4j_atomicAdd<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int *) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ Nd4jLong nd4j_atomicAdd<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* address_as_ull = (unsigned long long int *) address;
//return (Nd4jLong) atomicAdd(address_as_ull, (unsigned long long int) val);
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, val + assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ long nd4j_atomicAdd<long>(long* address, long val) {
unsigned long long* address_as_ull = (unsigned long long int *) address;
// return atomicAdd(address, val);
unsigned long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, val + assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ uint32_t nd4j_atomicAdd<uint32_t>(uint32_t* address, uint32_t val) {
return atomicAdd(address, val);
}
template <>
inline __device__ uint64_t nd4j_atomicAdd<uint64_t>(uint64_t* address, uint64_t val) {
// unsigned long long* address_as_ull = (unsigned long long int *) address;
//
//// return atomicAdd(address, val);
// unsigned long int old = *address_as_ull, assumed;
// do {
// assumed = old;
// old = atomicCAS(address_as_ull, assumed, val + assumed);
// } while (assumed != old);
// return old;
return (uint64_t)atomicAdd((unsigned long long*)address, (unsigned long long)val);
}
template <>
inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) {
#if __CUDA_ARCH__ >= 700 && defined(CUDA_10)
atomicAdd(reinterpret_cast<__half*>(address), val.data);
#else
auto address_as_ull = (int*) address;
long addr = (long) address;
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
PAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
float16 res = ((float16) old.B.H) + val;
fresh.B.H = res.data;
fresh.B.L = old.B.L;
} else {
float16 res = ((float16) old.B.L) + val;
fresh.B.L = res.data;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
#endif
}
template <>
inline __device__ bfloat16 nd4j_atomicAdd<bfloat16>(bfloat16* address, bfloat16 val) {
auto address_as_ull = (int*) address;
auto addr = (long)(address);
bool misaligned = addr & 0x3;
if (misaligned)
address_as_ull = (int *) (address - 1);
BPAIR old, assumed, fresh;
old.W = *address_as_ull;
do {
if (!misaligned) {
bfloat16 res = old.B.H + val;
fresh.B.H = res;
fresh.B.L = old.B.L;
} else {
bfloat16 res = old.B.L + val;
fresh.B.L = res;
fresh.B.H = old.B.H;
}
assumed.W = old.W;
old.W = atomicCAS(address_as_ull, assumed.W, fresh.W);
} while (assumed.W != old.W);
if (!misaligned) return old.B.H;
else return old.B.L;
}
template <typename T>
static inline __device__ T internal_16bit_atomicAdd(T* address, T val) {
size_t shift = ((size_t)address & 2);
int *base_address = (int *)((char*)address - shift);
union I16PAIR {
struct {
T H;
T L;
} B;
int W;
__host__ __device__
I16PAIR() {};
__host__ __device__
~I16PAIR() {};
};
I16PAIR pairNew, pairOld, pairAssumed;
if (reinterpret_cast<int*>(address) == base_address) {
pairOld.B.L = val;
do {
pairNew.B.L = pairOld.B.L;
pairNew.B.H = pairOld.B.H + val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.H;
} else {
pairOld.B.H = val;
do {
pairNew.B.H = pairOld.B.H;
pairNew.B.L = pairOld.B.L + val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.L;
}
}
template <>
inline __device__ int16_t nd4j_atomicAdd<int16_t>(int16_t* address, int16_t val) {
return internal_16bit_atomicAdd<int16_t>(address, val);
}
template <>
inline __device__ uint16_t nd4j_atomicAdd<uint16_t>(uint16_t* address, uint16_t val) {
return internal_16bit_atomicAdd<uint16_t>(address, val);
}
template <>
inline __device__ int8_t nd4j_atomicAdd<int8_t>(int8_t* address, int8_t val) {
int res = *address;
atomicAdd(&res, (int)val);
*address = res;
return *address;
}
template <>
inline __device__ uint8_t nd4j_atomicAdd<uint8_t>(uint8_t* address, uint8_t val) {
int res = *address;
atomicAdd(&res, (int)val);
*address = res;
return *address;
}
template <>
inline __device__ bool nd4j_atomicAdd<bool>(bool* address, bool val) {
*address += (val);
return *address;
}
template <>
inline __device__ double nd4j_atomicSub<double>(double* address, double val) {
return nd4j_atomicAdd<double>(address, -val);
}
template <>
inline __device__ double nd4j_atomicMul<double>(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val *
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
template <>
inline __device__ double nd4j_atomicDiv<double>(double* address, double val) {
return nd4j_atomicMul<double>(address, 1./val);
}
template <>
inline __device__ float nd4j_atomicAdd<float>(float* address, float val) {
return atomicAdd(address,val);
}
//template <>
//inline __device__ int nd4j_atomicAdd<int>(int* address, int val) {
// return atomicAdd(address, val);
//}
template <>
inline __device__ int32_t nd4j_atomicAdd<int32_t>(int32_t* address, int32_t val) {
return (int32_t)atomicAdd((int*)address, (int)val);
}
template <>
inline __device__ float nd4j_atomicSub<float>(float* address, float val) {
return nd4j_atomicAdd<float>(address, -val);
}
template <>
inline __device__ float16 nd4j_atomicSub<float16>(float16* address, float16 val) {
return nd4j_atomicAdd<float16>(address, -val);
}
template <>
inline __device__ bfloat16 nd4j_atomicSub<bfloat16>(bfloat16* address, bfloat16 val) {
return nd4j_atomicAdd<bfloat16>(address, -val);
}
template <>
inline __device__ float nd4j_atomicMul<float>(float* address, float val) {
int* address_as_ull =
( int*)address;
int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __float_as_int(val *
__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
template <>
inline __device__ int8_t nd4j_atomicMul<int8_t>(int8_t* address, int8_t val) {
unsigned int *base_address = (unsigned int *)((size_t)address & ~3);
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int sel = selectors[(size_t)address & 3];
unsigned int old, assumed, mul, new_;
old = *base_address;
do {
assumed = old;
mul = val * (int8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440);
new_ = __byte_perm(old, mul, sel);
if (new_ == old)
break;
old = atomicCAS(base_address, assumed, new_);
} while (assumed != old);
return (int8_t)old;
}
template <>
inline __device__ unsigned char nd4j_atomicMul<unsigned char>(unsigned char* address, unsigned char val) {
unsigned int *base_address = (unsigned int *)((size_t)address & ~3);
unsigned int selectors[] = {0x3214, 0x3240, 0x3410, 0x4210};
unsigned int sel = selectors[(size_t)address & 3];
unsigned int old, assumed, mul, new_;
old = *base_address;
do {
assumed = old;
mul = val * (uint8_t)__byte_perm(old, 0, ((size_t)address & 3) | 0x4440);
new_ = __byte_perm(old, mul, sel);
if (new_ == old)
break;
old = atomicCAS(base_address, assumed, new_);
} while (assumed != old);
return (uint8_t)old;
}
template <typename T>
static inline __device__ T internal_16bit_atomicMul(T* address, T val) {
size_t shift = ((size_t)address & 2);
int *base_address = (int *)((char*)address - shift);
union I16PAIR {
struct {
T H;
T L;
} B;
int W;
__host__ __device__
I16PAIR() {};
__host__ __device__
~I16PAIR() {};
};
I16PAIR pairNew, pairOld, pairAssumed;
if (reinterpret_cast<int*>(address) == base_address) {
pairOld.B.L = val;
do {
pairNew.B.L = pairOld.B.L;
pairNew.B.H = pairOld.B.H * val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.H;
} else {
pairOld.B.H = val;
do {
pairNew.B.H = pairOld.B.H;
pairNew.B.L = pairOld.B.L * val;
pairAssumed.W = pairOld.W;
pairOld.W = atomicCAS(base_address, pairAssumed.W, pairNew.W);
} while (pairAssumed.W != pairOld.W);
return (T) pairOld.B.L;
}
}
template <>
inline __device__ int16_t nd4j_atomicMul<int16_t>(int16_t* address, int16_t val) {
return internal_16bit_atomicMul<int16_t>(address, val);
}
template <>
inline __device__ uint16_t nd4j_atomicMul<uint16_t>(uint16_t* address, uint16_t val) {
return internal_16bit_atomicMul<uint16_t>(address, val);
}
template <>
inline __device__ int nd4j_atomicMul<int>(int* address, int val) {
int* res_address = address;
int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ unsigned int nd4j_atomicMul<unsigned int>(unsigned int* address, unsigned int val) {
unsigned int* res_address = address;
unsigned int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return old;
}
template <>
inline __device__ int64_t nd4j_atomicMul<int64_t>(int64_t* address, int64_t val) {
unsigned long long int* res_address = (unsigned long long int*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (int64_t)old;
}
template <>
inline __device__ uint64_t nd4j_atomicMul<uint64_t>(uint64_t* address, uint64_t val) {
unsigned long long int* res_address = (unsigned long long int*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (uint64_t)old;
}
#if !defined(_WIN32) && !defined(_WIN64)
template <>
inline __device__ Nd4jLong nd4j_atomicMul<Nd4jLong>(Nd4jLong* address, Nd4jLong val) {
unsigned long long int* res_address = (unsigned long long*)address;
unsigned long long int old = *res_address, assumed;
do {
assumed = old;
old = atomicCAS(res_address, assumed, val * assumed);
} while (assumed != old);
return (Nd4jLong)old;
}
#endif
template <>
inline __device__ bfloat16 nd4j_atomicMul<bfloat16>(bfloat16* address, bfloat16 val) {
return internal_16bit_atomicMul<bfloat16>(address, val);
}
template <>
inline __device__ float16 nd4j_atomicMul<float16>(float16* address, float16 val) {
return internal_16bit_atomicMul<float16>(address, val);
}
template <>
inline __device__ float nd4j_atomicDiv<float>(float* address, float val) {
return nd4j_atomicMul<float>(address, 1.f / val);
}
template <>
inline __device__ float16 nd4j_atomicDiv<float16>(float16* address, float16 val) {
return internal_16bit_atomicMul<float16>(address, (float16) 1.f / val);
}
template <>
inline __device__ bfloat16 nd4j_atomicDiv<bfloat16>(bfloat16* address, bfloat16 val) {
return internal_16bit_atomicMul<bfloat16>(address, (bfloat16) 1 / val);
}
}
#endif
}
}
#ifdef _OPENMP
#ifndef MAX_FLOAT
#define MAX_FLOAT 1e37
#endif
#pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=-MAX_FLOAT)
#pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=MAX_FLOAT)
#pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\
initializer (omp_priv=0)
#pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_max(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_min(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) )
#pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = nd4j::math::nd4j_abs(omp_in) + nd4j::math::nd4j_abs(omp_out))\
initializer (omp_priv=0)
#pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in + omp_out)\
initializer (omp_priv=0)
#pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \
omp_out = omp_in * omp_out)\
initializer (omp_priv=1)
#endif
#endif /* TEMPLATEMATH_H_ */
|
xomp_accelerator_sched_test.c | // Liao 8/30/2013
// A dedicated self-contained file to test a scheduler using round-robin method across multiple threads
// Compile : gcc -fopenmp thisfile.c
#include <stdio.h>
#include <omp.h>
#include <assert.h>
/*
_p_num_threads: number of threads of the thread team participating the scheduling
_p_thread_id: the current thread's id within the current team
Return the adjusted numbers including:
loop_chunk_size: the real chunk size considering original chunksize and step
loop_sched_index: the lower bound for current thread
loop_stride: the total stride for one round of scheduling of all threads
*/
void XOMP_static_sched_init(int lb, int up, int step, int orig_chunk_size, int _p_num_threads, int _p_thread_id, \
int * loop_chunk_size, int * loop_sched_index, int * loop_stride)
{
//TODO adjust inclusive vs. exclusive loop bounds
int nthds = _p_num_threads;
if (nthds == 1) { /* not in parallel */
*loop_sched_index = lb;
//loop_end = up;
return;
}
*loop_chunk_size = orig_chunk_size * step;
*loop_sched_index = lb + (*loop_chunk_size)* _p_thread_id;
*loop_stride = (*loop_chunk_size) * nthds;
//int loop_end = up;
// int is_last = 0;
}
/*
Using current thread ID (_p_thread_id) and team size (_p_num_threads), calculate lb and ub for the current thread
for the round robin scheduling with lower (loop_sched_index), upper (loop_end) , stride (loop_stride), and chunk size (loop_chunk_size)
*/
int XOMP_static_sched_next(
int* loop_sched_index , int loop_end, int loop_stride, int loop_chunk_size,
int _p_num_threads, int _p_thread_id,
int *lb,int *ub)
{
int b,e;
b = *loop_sched_index;
if (_p_num_threads == 1) { /* not in parallel */
e = loop_end;
if(b == e) return 0;
*lb = b;
*ub = e;
*loop_sched_index = e;
return 1;
}
*loop_sched_index += loop_stride;
e = b + loop_chunk_size;
if(loop_chunk_size > 0){
if(b >= loop_end) return 0;
if(e >= loop_end){
e = loop_end;
// tp->is_last = 1;
}
} else {
if(b <= loop_end) return 0;
#if 0
if(e <= tp->loop_end){
e = tp->loop_end;
tp->is_last = 1;
}
#endif
}
*lb = b;
*ub = e;
return 1;
}
void OUT__2__10550__(int n,int *_dev_u)
{
int ij;
int _dev_lower, _dev_upper;
// variables for adjusted loop info considering both original chunk size and step(strip)
int _dev_loop_chunk_size;
int _dev_loop_sched_index;
int _dev_loop_stride;
// 1-D thread block:
int _dev_thread_num = 4;
int _dev_thread_id = omp_get_thread_num();
printf ("current thread id = %d\n", _dev_thread_id);
//TODO: adjust bound to be inclusive later
int orig_start =0; // must be correct!!
int orig_end = n; // exclusive bound
int orig_step = 1;
int orig_chunk_size = 1;
XOMP_static_sched_init (orig_start, orig_end, orig_step, orig_chunk_size, _dev_thread_num, _dev_thread_id, \
& _dev_loop_chunk_size , & _dev_loop_sched_index, & _dev_loop_stride);
printf ("Initialized chunk size = %d, sched indx =%d, stride = %d\n",_dev_loop_chunk_size, _dev_loop_sched_index, _dev_loop_stride);
while (XOMP_static_sched_next (&_dev_loop_sched_index, orig_end, _dev_loop_stride, _dev_loop_chunk_size, _dev_thread_num, _dev_thread_id, & _dev_lower
, & _dev_upper))
{
printf ("Thread ID: %d Allocated lower = %d upper = %d\n", _dev_thread_id, _dev_lower, _dev_upper);
for (ij = _dev_lower ; ij < _dev_upper; ij ++) { // using exclusive bound here
_dev_u[ij] = n - ij;
}
}
}
#define SIZE 10
int a[SIZE], b[SIZE] ;
int main ()
{
int i;
// reference array and values for each element
for (i=0; i<SIZE; i++)
a[i]= SIZE - i; // reverse order to make sure no default values are messing up things
// calcualted array elements using the scheduling functions
#pragma omp parallel sections num_threads(4)
{
#pragma omp section
OUT__2__10550__ (SIZE, b);
#pragma omp section
OUT__2__10550__ (SIZE, b);
#pragma omp section
OUT__2__10550__ (SIZE, b);
#pragma omp section
OUT__2__10550__ (SIZE, b);
}
for (i=0; i<SIZE; i++)
{
printf ("a[%d]=%d, b[%d]=%d\n", i, a[i], i, b[i]);
}
for (i=0; i<SIZE; i++)
{
assert (a[i]==b[i]);
}
printf ("Success if you see this printf output!\n");
return 0;
}
|
GB_unaryop__lnot_uint32_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint32_uint16
// op(A') function: GB_tran__lnot_uint32_uint16
// C type: uint32_t
// A type: uint16_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint32_uint16
(
uint32_t *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(16*t1+Ny+29,32)),floord(32*t2+Ny+28,32)),floord(32*t1-32*t2+Nz+Ny+27,32));t3++) {
for (t4=max(max(max(0,ceild(t1-15,16)),ceild(32*t2-Nz-252,256)),ceild(32*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(16*t1+Nx+29,256)),floord(32*t2+Nx+28,256)),floord(32*t3+Nx+28,256)),floord(32*t1-32*t2+Nz+Nx+27,256));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),32*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),32*t3+30),256*t4+254),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
GB_unaryop__lnot_bool_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_bool_uint8
// op(A') function: GB_tran__lnot_bool_uint8
// C type: bool
// A type: uint8_t
// cast: bool cij = (bool) aij
// unaryop: cij = !aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_bool_uint8
(
bool *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_bool_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
resource_manager_test.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & Newcastle University for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef UNIT_CORE_RESOURCE_MANAGER_TEST_H_
#define UNIT_CORE_RESOURCE_MANAGER_TEST_H_
#include <algorithm>
#include <unordered_map>
#include <vector>
#include "core/agent/agent.h"
#include "core/environment/environment.h"
#include "core/resource_manager.h"
#include "core/util/io.h"
#include "core/util/type.h"
#include "unit/test_util/test_agent.h"
#include "unit/test_util/test_util.h"
#include "core/diffusion/euler_grid.h"
#define ROOTFILE "bdmFile.root"
namespace bdm {
class A : public TestAgent {
BDM_AGENT_HEADER(A, TestAgent, 1);
public:
A() {}
explicit A(int data) { data_ = data; }
int GetData() const { return data_; }
void SetData(int data) { data_ = data; }
int data_;
};
class B : public TestAgent {
BDM_AGENT_HEADER(B, TestAgent, 1);
public:
B() {}
explicit B(double data) { data_ = data; }
double GetData() const { return data_; }
void SetData(double data) { data_ = data; }
double data_;
};
inline void RunForEachAgentTest() {
const double kEpsilon = abs_error<double>::value;
Simulation simulation("RunForEachAgentTest");
auto* rm = simulation.GetResourceManager();
auto ref_uid = AgentUid(simulation.GetAgentUidGenerator()->GetHighestIndex());
rm->AddAgent(new A(12));
rm->AddAgent(new A(34));
rm->AddAgent(new B(3.14));
rm->AddAgent(new B(6.28));
uint64_t counter = 0;
rm->ForEachAgent([&](Agent* element) { // NOLINT
counter++;
switch (element->GetUid() - ref_uid) {
case 0:
EXPECT_EQ(12, dynamic_cast<A*>(element)->GetData());
break;
case 1:
EXPECT_EQ(34, dynamic_cast<A*>(element)->GetData());
break;
case 2:
EXPECT_NEAR(3.14, dynamic_cast<B*>(element)->GetData(), kEpsilon);
break;
case 3:
EXPECT_NEAR(6.28, dynamic_cast<B*>(element)->GetData(), kEpsilon);
break;
}
});
EXPECT_EQ(4u, counter);
}
inline void RunGetNumAgents() {
Simulation simulation("ResourceManagerTest-RunGetNumAgents");
auto* rm = simulation.GetResourceManager();
rm->AddAgent(new A(12));
rm->AddAgent(new A(34));
rm->AddAgent(new A(59));
rm->AddAgent(new B(3.14));
rm->AddAgent(new B(6.28));
EXPECT_EQ(5u, rm->GetNumAgents());
}
struct ForEachAgentParallelTestFunctor : Functor<void, Agent*> {
void operator()(Agent* agent) override {
const double kEpsilon = abs_error<double>::value;
B* b = dynamic_cast<B*>(agent);
AgentUid uid = agent->GetUid();
if (uid == AgentUid(0)) {
EXPECT_EQ(3.14, b->GetData());
} else if (uid == AgentUid(1)) {
EXPECT_EQ(6.28, b->GetData());
} else if (uid == AgentUid(2)) {
EXPECT_NEAR(9.42, b->GetData(), kEpsilon);
} else {
FAIL();
}
}
};
// This test uses Cells since A, and B are strippted down agents
// and are themselves not thread safe.
inline void RunForEachAgentParallelTest() {
Simulation simulation("RunForEachAgentParallelTest");
auto* rm = simulation.GetResourceManager();
rm->AddAgent(new B(3.14));
rm->AddAgent(new B(6.28));
rm->AddAgent(new B(9.42));
ForEachAgentParallelTestFunctor functor;
rm->ForEachAgentParallel(functor);
}
inline void RunRemoveAndContainsTest() {
Simulation simulation("ResourceManagerTest-RunRemoveAndContainsTest");
auto* rm = simulation.GetResourceManager();
A* a0 = new A(12);
auto a0_uid = a0->GetUid();
rm->AddAgent(a0);
A* a1 = new A(34);
auto a1_uid = a1->GetUid();
rm->AddAgent(a1);
A* a2 = new A(59);
auto a2_uid = a2->GetUid();
rm->AddAgent(a2);
B* b0 = new B(3.14);
auto b0_uid = b0->GetUid();
rm->AddAgent(b0);
B* b1 = new B(6.28);
auto b1_uid = b1->GetUid();
rm->AddAgent(b1);
EXPECT_TRUE(rm->ContainsAgent(a0_uid));
EXPECT_TRUE(rm->ContainsAgent(a1_uid));
EXPECT_TRUE(rm->ContainsAgent(a2_uid));
EXPECT_TRUE(rm->ContainsAgent(b0_uid));
EXPECT_TRUE(rm->ContainsAgent(b1_uid));
rm->RemoveAgent(a0_uid);
rm->RemoveAgent(a1_uid);
rm->RemoveAgent(a2_uid);
rm->RemoveAgent(b0_uid);
rm->RemoveAgent(b1_uid);
EXPECT_FALSE(rm->ContainsAgent(a0_uid));
EXPECT_FALSE(rm->ContainsAgent(a1_uid));
EXPECT_FALSE(rm->ContainsAgent(a2_uid));
EXPECT_FALSE(rm->ContainsAgent(b0_uid));
EXPECT_FALSE(rm->ContainsAgent(b1_uid));
EXPECT_EQ(0u, rm->GetNumAgents());
}
inline void RunClearTest() {
Simulation simulation("ResourceManagerTest-RunClearTest");
auto* rm = simulation.GetResourceManager();
A* a0 = new A(12);
auto a0_uid = a0->GetUid();
rm->AddAgent(a0);
A* a1 = new A(34);
auto a1_uid = a1->GetUid();
rm->AddAgent(a1);
A* a2 = new A(59);
auto a2_uid = a2->GetUid();
rm->AddAgent(a2);
B* b0 = new B(3.14);
auto b0_uid = b0->GetUid();
rm->AddAgent(b0);
B* b1 = new B(6.28);
auto b1_uid = b1->GetUid();
rm->AddAgent(b1);
EXPECT_TRUE(rm->ContainsAgent(a0_uid));
EXPECT_TRUE(rm->ContainsAgent(a1_uid));
EXPECT_TRUE(rm->ContainsAgent(a2_uid));
EXPECT_TRUE(rm->ContainsAgent(b0_uid));
EXPECT_TRUE(rm->ContainsAgent(b1_uid));
rm->ClearAgents();
EXPECT_FALSE(rm->ContainsAgent(a0_uid));
EXPECT_FALSE(rm->ContainsAgent(a1_uid));
EXPECT_FALSE(rm->ContainsAgent(a2_uid));
EXPECT_FALSE(rm->ContainsAgent(b0_uid));
EXPECT_FALSE(rm->ContainsAgent(b1_uid));
EXPECT_EQ(0u, rm->GetNumAgents());
}
inline void RunPushBackAndGetAgentTest() {
const double kEpsilon = abs_error<double>::value;
Simulation simulation("RunPushBackAndGetAgentTest");
auto* rm = simulation.GetResourceManager();
auto ref_uid = AgentUid(simulation.GetAgentUidGenerator()->GetHighestIndex());
rm->AddAgent(new A(12));
rm->AddAgent(new A(34));
rm->AddAgent(new B(3.14));
rm->AddAgent(new B(6.28));
rm->AddAgent(new A(87));
EXPECT_EQ(dynamic_cast<A*>(rm->GetAgent(ref_uid))->GetData(), 12);
EXPECT_EQ(dynamic_cast<A*>(rm->GetAgent(ref_uid + 1))->GetData(), 34);
EXPECT_EQ(dynamic_cast<A*>(rm->GetAgent(ref_uid + 4))->GetData(), 87);
EXPECT_NEAR(dynamic_cast<B*>(rm->GetAgent(ref_uid + 2))->GetData(), 3.14,
kEpsilon);
EXPECT_NEAR(dynamic_cast<B*>(rm->GetAgent(ref_uid + 3))->GetData(), 6.28,
kEpsilon);
}
// -----------------------------------------------------------------------------
// https://github.com/osmhpi/pgasus/blob/775a5f90d8f6fa89cfb93eac6de16dcfe27167ce/src/util/mmaphelper.cpp
inline static void* AlignPage(const void* ptr) {
static constexpr uintptr_t kPageMask = ~(uintptr_t(0xFFF));
return (void*)(((uintptr_t)ptr) & kPageMask); // NOLINT
}
inline int GetNumaNodeForMemory(const void* ptr) {
int result, loc;
void* pptr = AlignPage(ptr);
result = numa_move_pages(0, 1, &pptr, nullptr, &loc, 0);
return (result != 0) ? -1 : loc;
}
inline std::vector<uint64_t> GetAgentsPerNuma(uint64_t num_agents) {
// balance agents per numa node according to the number of
// threads associated with each numa domain
auto* ti = ThreadInfo::GetInstance();
int numa_nodes = ti->GetNumaNodes();
std::vector<uint64_t> agent_per_numa(numa_nodes);
uint64_t cummulative = 0;
auto max_threads = ti->GetMaxThreads();
for (int n = 1; n < numa_nodes; ++n) {
auto threads_in_numa = ti->GetThreadsInNumaNode(n);
uint64_t num_agents_loc = num_agents * threads_in_numa / max_threads;
agent_per_numa[n] = num_agents_loc;
cummulative += num_agents_loc;
}
agent_per_numa[0] = num_agents - cummulative;
return agent_per_numa;
}
// -----------------------------------------------------------------------------
struct CheckForEachAgentFunctor : Functor<void, Agent*> {
bool numa_checks;
std::vector<bool> found;
std::atomic<uint64_t> cnt;
// counts the number of agents in each numa domain
std::vector<uint64_t> numa_agent_cnts;
std::atomic<uint64_t> numa_memory_errors;
std::atomic<uint64_t> numa_thread_errors;
CheckForEachAgentFunctor(uint64_t num_agent_per_type, bool numa_checks)
: numa_checks(numa_checks),
cnt(0),
numa_memory_errors(0),
numa_thread_errors(0) {
found.resize(2 * num_agent_per_type);
for (uint64_t i = 0; i < found.size(); ++i) {
found[i] = false;
}
auto* ti = ThreadInfo::GetInstance();
numa_agent_cnts.resize(ti->GetNumaNodes());
}
void operator()(Agent* agent) override {
size_t index = 0;
if (A* a = dynamic_cast<A*>(agent)) {
index = a->GetData();
} else if (B* b = dynamic_cast<B*>(agent)) {
index = std::round(b->GetData());
}
auto* rm = Simulation::GetActive()->GetResourceManager();
auto handle = rm->GetAgentHandle(agent->GetUid());
#pragma omp critical
{
found[index] = true;
// verify that a thread processes agents on the same NUMA node.
if (numa_checks && handle.GetNumaNode() != GetNumaNodeForMemory(agent)) {
numa_memory_errors++;
}
if (numa_checks &&
handle.GetNumaNode() != numa_node_of_cpu(sched_getcpu())) {
numa_thread_errors++;
}
numa_agent_cnts[handle.GetNumaNode()]++;
}
cnt++;
}
};
inline void CheckForEachAgent(ResourceManager* rm, uint64_t num_agent_per_type,
bool numa_checks = false) {
CheckForEachAgentFunctor functor(num_agent_per_type, numa_checks);
rm->ForEachAgentParallel(functor);
EXPECT_EQ(2 * num_agent_per_type, functor.cnt.load());
ASSERT_EQ(2 * num_agent_per_type, functor.found.size());
for (uint64_t i = 0; i < functor.found.size(); ++i) {
if (!functor.found[i]) {
FAIL() << "ForEachAgentParallel was not called for element with data_="
<< i;
}
}
if (numa_checks) {
EXPECT_EQ(0u, functor.numa_memory_errors.load());
EXPECT_EQ(0u, functor.numa_thread_errors.load());
auto agent_per_numa = GetAgentsPerNuma(2 * num_agent_per_type);
auto* ti = ThreadInfo::GetInstance();
for (int n = 0; n < ti->GetNumaNodes(); ++n) {
EXPECT_EQ(agent_per_numa[n], functor.numa_agent_cnts[n]);
}
}
}
inline void RunSortAndForEachAgentParallel(uint64_t num_agent_per_type) {
Simulation simulation("RunSortAndForEachAgentParallel");
auto* rm = simulation.GetResourceManager();
std::unordered_map<AgentUid, double> a_x_values;
std::unordered_map<AgentUid, double> b_x_values;
for (uint64_t i = 0; i < num_agent_per_type; ++i) {
double x_pos = i * 30.0;
A* a = new A(i);
a->SetDiameter(10);
a->SetPosition({x_pos, 0, 0});
rm->AddAgent(a);
a_x_values[a->GetUid()] = x_pos;
B* b = new B(i + num_agent_per_type);
b->SetDiameter(10);
b->SetPosition({x_pos, 0, 0});
rm->AddAgent(b);
b_x_values[b->GetUid()] = x_pos;
}
CheckForEachAgent(rm, num_agent_per_type);
simulation.GetEnvironment()->Update();
rm->LoadBalance();
CheckForEachAgent(rm, num_agent_per_type, true);
// check if agent uids still point to the correct object
for (auto& entry : a_x_values) {
auto x_actual = rm->GetAgent(entry.first)->GetPosition()[0];
EXPECT_EQ(x_actual, entry.second);
}
for (auto& entry : b_x_values) {
auto x_actual = rm->GetAgent(entry.first)->GetPosition()[0];
EXPECT_EQ(x_actual, entry.second);
}
}
inline void RunSortAndForEachAgentParallel() {
int num_threads = omp_get_max_threads();
std::vector<int> num_agent_per_type = {std::max(1, num_threads - 1),
num_threads, 3 * num_threads,
3 * num_threads + 1};
for (auto n : num_agent_per_type) {
RunSortAndForEachAgentParallel(n);
}
RunSortAndForEachAgentParallel(1000);
}
// -----------------------------------------------------------------------------
struct CheckForEachAgentDynamicFunctor : Functor<void, Agent*, AgentHandle> {
CheckForEachAgentDynamicFunctor(bool numa_checks, std::vector<bool>& found)
: numa_checks_(numa_checks),
found_(found),
cnt(0),
numa_memory_errors(0) {
auto* ti = ThreadInfo::GetInstance();
numa_agent_cnts.resize(ti->GetNumaNodes());
}
void operator()(Agent* agent, AgentHandle handle) override {
#pragma omp critical
{
size_t index = 0;
if (A* a = dynamic_cast<A*>(agent)) {
index = a->GetData();
} else if (B* b = dynamic_cast<B*>(agent)) {
index = std::round(b->GetData());
}
found_[index] = true;
// verify that a thread processes agents on the same NUMA node.
if (numa_checks_ && handle.GetNumaNode() != GetNumaNodeForMemory(agent)) {
numa_memory_errors++;
}
numa_agent_cnts[handle.GetNumaNode()]++;
}
cnt++;
}
bool numa_checks_;
std::vector<bool>& found_;
std::atomic<uint64_t> cnt;
// counts the number of agents in each numa domain
std::vector<uint64_t> numa_agent_cnts;
// If an agent is not stored on the NUMA indicated, it is a memory
// error.
std::atomic<uint64_t> numa_memory_errors;
};
struct CheckNumaThreadErrors : Functor<void, Agent*, AgentHandle> {
CheckNumaThreadErrors() : numa_thread_errors(0) {
ti_ = ThreadInfo::GetInstance();
}
void operator()(Agent* agent, AgentHandle handle) override {
volatile double d = 0;
for (int i = 0; i < 10000; i++) {
d += std::sin(i);
}
if (handle.GetNumaNode() != ti_->GetNumaNode(omp_get_thread_num())) {
numa_thread_errors++;
}
}
// If an agent is processed by a thread that doesn't belong to the NUMA
// domain the agent is stored on, it is a thread error.
std::atomic<uint64_t> numa_thread_errors;
ThreadInfo* ti_;
};
inline void CheckForEachAgentDynamic(ResourceManager* rm,
uint64_t num_agent_per_type,
uint64_t batch_size,
bool numa_checks = false) {
std::vector<bool> found(2 * num_agent_per_type);
ASSERT_EQ(2 * num_agent_per_type, found.size());
for (uint64_t i = 0; i < found.size(); ++i) {
found[i] = false;
}
auto* ti = ThreadInfo::GetInstance();
CheckForEachAgentDynamicFunctor functor(numa_checks, found);
rm->ForEachAgentParallel(batch_size, functor);
// critical sections increase the variance of numa_thread_errors.
// Therefore, there are checked separately.
CheckNumaThreadErrors check_numa_thread_functor;
rm->ForEachAgentParallel(batch_size, check_numa_thread_functor);
// verify that the function has been called once for each agent
EXPECT_EQ(2 * num_agent_per_type, functor.cnt.load());
ASSERT_EQ(2 * num_agent_per_type, found.size());
for (uint64_t i = 0; i < found.size(); ++i) {
if (!found[i]) {
FAIL() << "ForEachAgentParallel was not called for element with data_="
<< i;
}
}
if (numa_checks) {
// If there are memory errors, check of
// `cat /proc/sys/kernel/numa_balancing` is zero.
// Automatic rebalancing can lead to numa memory errors.
// only 0.1% of all agents may be on a wrong numa node
EXPECT_GT(0.001, (functor.numa_memory_errors.load() + 0.0) /
(2 * num_agent_per_type));
// work stealing can cause thread errors. This check ensures that at least
// 75% of the work is done by the correct CPU-Memory mapping.
if (num_agent_per_type >
20 * static_cast<uint64_t>(omp_get_max_threads())) {
EXPECT_GT(num_agent_per_type / 4,
check_numa_thread_functor.numa_thread_errors.load());
}
auto agent_per_numa = GetAgentsPerNuma(2 * num_agent_per_type);
for (int n = 0; n < ti->GetNumaNodes(); ++n) {
EXPECT_EQ(agent_per_numa[n], functor.numa_agent_cnts[n]);
}
}
}
inline void RunSortAndForEachAgentParallelDynamic(uint64_t num_agent_per_type,
uint64_t batch_size) {
Simulation simulation("RunSortAndForEachAgentParallelDynamic");
auto* rm = simulation.GetResourceManager();
std::unordered_map<AgentUid, double> a_x_values;
std::unordered_map<AgentUid, double> b_x_values;
for (uint64_t i = 0; i < num_agent_per_type; ++i) {
double x_pos = i * 30.0;
A* a = new A(i);
a->SetDiameter(10);
a->SetPosition({x_pos, 0, 0});
rm->AddAgent(a);
a_x_values[a->GetUid()] = x_pos;
B* b = new B(i + num_agent_per_type);
b->SetDiameter(10);
b->SetPosition({x_pos, 0, 0});
rm->AddAgent(b);
b_x_values[b->GetUid()] = x_pos;
}
CheckForEachAgentDynamic(rm, num_agent_per_type, batch_size);
simulation.GetEnvironment()->Update();
rm->LoadBalance();
CheckForEachAgentDynamic(rm, num_agent_per_type, batch_size, true);
// check if agent uids still point to the correct object
for (auto& entry : a_x_values) {
auto x_actual = rm->GetAgent(entry.first)->GetPosition()[0];
EXPECT_EQ(x_actual, entry.second);
}
for (auto& entry : b_x_values) {
auto x_actual = rm->GetAgent(entry.first)->GetPosition()[0];
EXPECT_EQ(x_actual, entry.second);
}
}
inline void RunSortAndForEachAgentParallelDynamic() {
int num_threads = omp_get_max_threads();
std::vector<int> num_agent_per_type = {std::max(1, num_threads - 1),
num_threads, 3 * num_threads,
3 * num_threads + 1};
std::vector<int> batch_sizes = {std::max(1, num_threads - 1), num_threads,
3 * num_threads, 3 * num_threads + 1};
for (auto n : num_agent_per_type) {
for (auto b : batch_sizes) {
RunSortAndForEachAgentParallelDynamic(n, b);
}
}
for (auto b : batch_sizes) {
RunSortAndForEachAgentParallelDynamic(num_threads * 1000, b);
}
}
inline void RunIOTest() {
const double kEpsilon = abs_error<double>::value;
Simulation simulation("ResourceManagerTest-RunIOTest");
auto* rm = simulation.GetResourceManager();
auto ref_uid = AgentUid(simulation.GetAgentUidGenerator()->GetHighestIndex());
remove(ROOTFILE);
// setup
rm->AddAgent(new A(12));
rm->AddAgent(new A(34));
rm->AddAgent(new A(42));
rm->AddAgent(new B(3.14));
rm->AddAgent(new B(6.28));
DiffusionGrid* dgrid_1 = new EulerGrid(0, "Kalium", 0.4, 0, 2);
DiffusionGrid* dgrid_2 = new EulerGrid(1, "Natrium", 0.2, 0.1, 1);
rm->AddDiffusionGrid(dgrid_1);
rm->AddDiffusionGrid(dgrid_2);
// backup
WritePersistentObject(ROOTFILE, "rm", *rm, "new");
rm->ClearAgents();
// restore
ResourceManager* restored_rm = nullptr;
GetPersistentObject(ROOTFILE, "rm", restored_rm);
restored_rm->RebuildAgentUidMap();
// validate
EXPECT_EQ(5u, restored_rm->GetNumAgents());
EXPECT_EQ(12, dynamic_cast<A*>(restored_rm->GetAgent(ref_uid))->GetData());
EXPECT_EQ(34,
dynamic_cast<A*>(restored_rm->GetAgent(ref_uid + 1))->GetData());
EXPECT_EQ(42,
dynamic_cast<A*>(restored_rm->GetAgent(ref_uid + 2))->GetData());
EXPECT_NEAR(3.14,
dynamic_cast<B*>(restored_rm->GetAgent(ref_uid + 3))->GetData(),
kEpsilon);
EXPECT_NEAR(6.28,
dynamic_cast<B*>(restored_rm->GetAgent(ref_uid + 4))->GetData(),
kEpsilon);
EXPECT_EQ(0, restored_rm->GetDiffusionGrid(0)->GetSubstanceId());
EXPECT_EQ(1, restored_rm->GetDiffusionGrid(1)->GetSubstanceId());
EXPECT_EQ("Kalium", restored_rm->GetDiffusionGrid(0)->GetSubstanceName());
EXPECT_EQ("Natrium", restored_rm->GetDiffusionGrid(1)->GetSubstanceName());
EXPECT_EQ(0.6,
restored_rm->GetDiffusionGrid(0)->GetDiffusionCoefficients()[0]);
EXPECT_EQ(0.8,
restored_rm->GetDiffusionGrid(1)->GetDiffusionCoefficients()[0]);
delete restored_rm;
remove(ROOTFILE);
}
} // namespace bdm
#endif // UNIT_CORE_RESOURCE_MANAGER_TEST_H_
|
section.c | /** This Program uses Parallel Sections
* Sai Suraj
* 20/09/2021
**/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
void function1()
{
for (int i = 0; i <= 2; i++)
{
printf("\nSection 1 is executed by thread number : %d \n", omp_get_thread_num());
}
}
void function2()
{
for (int j = 0; j <= 3; j++)
{
printf("\nSection 2 is executed by thread number : %d \n", omp_get_thread_num());
}
}
int main(int argc, char* argv[])
{ int n =10;
int sum=0;
// Use 4 threads when creating OpenMP parallel regions
omp_set_num_threads(8);
{ // Create the parallel sections
#pragma omp parallel sections
{
#pragma omp section
{
function1();
}
#pragma omp section
{
function2();
}
}
for (int i = 1; i <=n; ++i)
sum += i;
}
printf("\n The Sum of first %d elements : %d.\n\n",n,sum);
return 0;
}
|
citrix_ns_fmt_plug.c | /*
* Description from Nicolas Ruff:
* - Salt value is hashed as an hexadecimal string, not bytes.
* - The trailing NULL byte of password string is taken into account during
* hashing.
* - The leading '1' is actually the string length
* '1' = 49 = len('1') + len(hex_salt) + len(hex_sha1)
*
* ---------------------------------------
* import hashlib
*
* def netscaler_hash( rand_bytes, pwd ):
* s = hashlib.sha1()
* s.update( rand_bytes )
* s.update( pwd )
* return "1" + rand_bytes + s.hexdigest()
*
* # TEST VECTOR
* # 14dfca1e6c0f5f3d96526c3ce70849992b7fad3e324cf6b0f
*
* rand_bytes = "4dfca1e6"
* pwd = "nsroot\x00"
* print netscaler_hash( rand_bytes, pwd )
* ---------------------------------------
*
* This software is Copyright (c) 2013 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* This version is hard coded for salt length 8 (for speed).
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_ctrxns;
#elif FMT_REGISTERS_H
john_register_one(&fmt_ctrxns);
#else
#include <string.h>
#ifdef _OPENMP
#ifdef MMX_COEF
#define OMP_SCALE 1024
#else
#define OMP_SCALE 2048
#endif
#include <omp.h>
#endif
#include "arch.h"
#include "misc.h"
#include "formats.h"
#include "options.h"
#include "johnswap.h"
#ifdef MMX_COEF
#define NBKEYS (MMX_COEF * SHA1_SSE_PARA)
#endif
#include "sse-intrinsics.h"
#include "common.h"
#include "sha.h"
#include "memdbg.h" // Must be last included header
#define FORMAT_LABEL "Citrix_NS10"
#define FORMAT_NAME "Netscaler 10"
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH (55 - SALT_SIZE - 1)
#define BINARY_SIZE 20
#define BINARY_ALIGN 4
#define SALT_SIZE 8
#define SALT_ALIGN 4
#ifdef MMX_COEF
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#define GETPOS(i, index) ((index & (MMX_COEF - 1)) * 4 + ((i) & (0xffffffff - 3)) * MMX_COEF + (((i) & 3) ^ 3) + (index >> (MMX_COEF >> 1)) * SHA_BUF_SIZ * MMX_COEF * 4) //for endianity conversion
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"100000000f1dc96f425971ba590a076fd0f8bccbf25c1ba0c", ""},
{"14623718525fe334bbd9c0704e06ce134ef17b51f6b33548c", " "},
{"15c5c5c5c6ccd884f6383f55a6aeba5f847775e57ab012675", "Tw"},
{"13333333319143136ba9ff9e18d1cb022b63df0926de9509e", "333"},
{"144434241d7ce89a7484cd202400639692258dde37efc29c5", "four"},
{"100010203e09cefed1847b7a2a5e7a5d2cdc67e8a56ed0bdd", "fiver"},
{"14dfca1e6c0f5f3d96526c3ce70849992b7fad3e324cf6b0f", "nsroot"},
{"1deadcafe7587ea23b25a6ccf3fd53192e36ad3e9a2553b20", "magnum!"},
{NULL}
};
#ifdef MMX_COEF
static unsigned char (*saved_key)[SHA_BUF_SIZ * 4 * NBKEYS];
static unsigned char (*crypt_key)[BINARY_SIZE * NBKEYS];
static int kpc;
#else
static char saved_salt[SALT_SIZE];
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / 4];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef MMX_COEF
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt / NBKEYS, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt / NBKEYS, MEM_ALIGN_SIMD);
kpc = self->params.max_keys_per_crypt;
#else
saved_plain = mem_calloc_tiny(sizeof(*saved_plain) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
#endif
}
static void *binary(char *ciphertext)
{
static unsigned char *realcipher;
int i, len;
if (!realcipher)
realcipher = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
len = *ciphertext;
ciphertext += len - 2 * BINARY_SIZE;
for(i = 0; i < BINARY_SIZE; i++)
{
realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16
+ atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])];
}
#ifdef MMX_COEF
alter_endianity(realcipher, BINARY_SIZE);
#endif
return (void*)realcipher;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int len;
len = *ciphertext;
if (len != (int)'1')
return 0;
if (strlen(ciphertext) != len)
return 0;
if (len != strspn(ciphertext, "0123456789abcdef"))
return 0;
return 1;
}
static void set_key(char *key, int index)
{
#ifdef MMX_COEF
const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key;
ARCH_WORD_32 *keybuf_word = (ARCH_WORD_32*)&saved_key[0][GETPOS(SALT_SIZE ^ 3, index)];
unsigned int len;
ARCH_WORD_32 temp;
len = SALT_SIZE;
while((temp = *wkey++) & 0xff) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 16));
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 24));
len+=2;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP(temp);
keybuf_word += MMX_COEF;
if (!(temp & 0xff000000))
{
*keybuf_word = 0x80000000;
len+=3;
goto key_cleaning;
}
len += 4;
}
*keybuf_word = 0x00800000;
key_cleaning:
keybuf_word += MMX_COEF;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += MMX_COEF;
}
len += 1; /* Trailing null is included */
((unsigned int*)saved_key)[15*MMX_COEF + (index&3) + (index>>2)*SHA_BUF_SIZ*MMX_COEF] = len << 3;
#else
strnzcpy(saved_plain[index], key, PLAINTEXT_LENGTH + 1);
#endif
}
static char *get_key(int index)
{
#ifdef MMX_COEF
unsigned int i, s;
static char out[PLAINTEXT_LENGTH + 1];
s = (((unsigned int*)saved_key)[15*MMX_COEF + (index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*SHA_BUF_SIZ*MMX_COEF] >> 3) - SALT_SIZE - 1;
for(i = 0; i < s; i++)
out[i] = ((char*)saved_key)[GETPOS(SALT_SIZE + i, index)];
out[i] = 0;
return out;
#else
return saved_plain[index];
#endif
}
static void *salt(char *ciphertext)
{
static union {
unsigned char c[SALT_SIZE];
ARCH_WORD_32 w;
} out;
ciphertext++;
memcpy(out.c, ciphertext, SALT_SIZE);
return (void*)out.c;
}
static void set_salt(void *salt)
{
#ifdef MMX_COEF
int i, index;
for (index = 0; index < kpc; index++)
for (i = 0; i < SALT_SIZE; i++)
saved_key[0][GETPOS(i, index)] =
((unsigned char*)salt)[i];
#else
memcpy(saved_salt, salt, SALT_SIZE);
#endif
}
static int cmp_all(void *binary, int count)
{
#ifdef MMX_COEF
unsigned int x, y=0;
for(; y<kpc / MMX_COEF; y++)
for(x = 0; x < MMX_COEF; x++)
{
if(((ARCH_WORD_32*)binary)[0] ==
((ARCH_WORD_32*)crypt_key)[x + y * MMX_COEF*5])
return 1;
}
return 0;
#else
int index = 0;
#ifdef _OPENMP
for (index = 0; index < count; index++)
#endif
if (((ARCH_WORD_32*)binary)[0] == crypt_key[index][0])
return 1;
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef MMX_COEF
unsigned int x, y;
x = index & 3;
y = index / 4;
if(((ARCH_WORD_32*)binary)[0] != ((ARCH_WORD_32*)crypt_key)[x + y * MMX_COEF*5])
return 0;
if(((ARCH_WORD_32*)binary)[1] != ((ARCH_WORD_32*)crypt_key)[x + y * MMX_COEF*5+MMX_COEF*1])
return 0;
if(((ARCH_WORD_32*)binary)[2] != ((ARCH_WORD_32*)crypt_key)[x + y * MMX_COEF*5+MMX_COEF*2])
return 0;
if(((ARCH_WORD_32*)binary)[3] != ((ARCH_WORD_32*)crypt_key)[x + y * MMX_COEF*5+MMX_COEF*3])
return 0;
if(((ARCH_WORD_32*)binary)[4] != ((ARCH_WORD_32*)crypt_key)[x + y * MMX_COEF*5+MMX_COEF*4])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return (1);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT;
#pragma omp parallel for
for (index = 0; index < loops; ++index)
#endif
{
#ifdef MMX_COEF
SSESHA1body(saved_key[index], (unsigned int*)crypt_key[index], NULL, SSEi_MIXED_IN);
#else
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char*)saved_salt, SALT_SIZE);
SHA1_Update(&ctx, (unsigned char*)saved_plain[index], strlen(saved_plain[index]) + 1);
SHA1_Final((unsigned char*)crypt_key[index], &ctx);
#endif
}
return count;
}
#ifdef MMX_COEF
#define HASH_IDX ((index&3)+(index/4)*MMX_COEF*5)
static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0xf; }
static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0xff; }
static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0xfff; }
static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0xffff; }
static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0xfffff; }
static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0xffffff; }
static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[HASH_IDX] & 0x7ffffff; }
#else
static int get_hash_0(int index) { return crypt_key[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_key[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_key[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_key[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_key[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_key[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_key[index][0] & 0x7ffffff; }
#endif
static int salt_hash(void *salt)
{
return *(ARCH_WORD_32*)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_ctrxns = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
binary,
salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
opt2.c | #include <malloc.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "omp.h"
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6f\n",(_m), stamp);
// N and MIN must be powers of 2
long N;
long MIN_SORT_SIZE;
long MIN_MERGE_SIZE;
int CUTOFF;
#define T int
void basicsort(long n, T data[n]);
void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length);
void merge(long n, T left[n], T right[n], T result[n*2], long start, long length,int i) {
if (length < MIN_MERGE_SIZE*2L || omp_in_final()) {
// Base case
basicmerge(n, left, right, result, start, length);
} else {
// Recursive decomposition
#pragma omp task final(i == CUTOFF) depend(in: left[0], right[0])
merge(n, left, right, result, start, length/2,i+1);
#pragma omp task final(i == CUTOFF) depend(in: left[length/2], right[length/2])
merge(n, left, right, result, start + length/2, length/2,i+1);
#pragma omp taskwait
}
}
void multisort(long n, T data[n], T tmp[n],int i) {
//if(omp_in_final()) printf ("\na\n");
if (n >= MIN_SORT_SIZE*4L && !omp_in_final()) {
// Recursive decomposition
#pragma omp task final(i == CUTOFF) depend(out: data[0])
multisort(n/4L, &data[0], &tmp[0],i+1);
#pragma omp task final(i == CUTOFF) depend(out: data[n/4L])
multisort(n/4L, &data[n/4L], &tmp[n/4L],i+1);
#pragma omp task final(i == CUTOFF) depend(out: data[n/2L])
multisort(n/4L, &data[n/2L], &tmp[n/2L],i+1);
#pragma omp task final(i == CUTOFF) depend(out: data[3L*n/4L])
multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L],i+1);
#pragma omp taskwait
#pragma omp task final(i == CUTOFF) depend(in: data[0], data[n/4L]) depend(out: tmp[0])
merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L,i+1);
#pragma omp task final(i == CUTOFF) depend(in: data[N/2L], data[3L*n/4L]) depend(out: tmp[n/2L])
merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L,i+1);
#pragma omp task final(i == CUTOFF) depend(in: tmp[0], tmp[n/2L])
merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n,i+1);
#pragma omp taskwait
} else {
// Base case
basicsort(n, data);
}
}
static void initialize(long length, T data[length]) {
#pragma omp taskloop grainsize(length/omp_get_max_threads())
for (long i = 0; i < length; i++) {
if (i==0) {
data[i] = rand();
} else {
data[i] = ((data[i-1]+1) * i * 104723L) % N;
}
}
}
static void clear(long length, T data[length]) {
#pragma omp taskloop grainsize(length/omp_get_max_threads())
for (long i = 0; i < length; i++) {
data[i] = 0;
}
}
void check_sorted(long n, T data[n])
{
int unsorted=0;
for (int i=1; i<n; i++)
if (data[i-1] > data[i]) unsorted++;
if (unsorted > 0)
printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted);
}
int main(int argc, char **argv) {
/* Defaults for command line arguments */
/* Important: all of them should be powers of two */
N = 32768 * 1024;
MIN_SORT_SIZE = 1024;
MIN_MERGE_SIZE = 1024;
CUTOFF = 4;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-n")==0) {
N = atol(argv[++i]) * 1024;
}
else if (strcmp(argv[i], "-s")==0) {
MIN_SORT_SIZE = atol(argv[++i]);
}
else if (strcmp(argv[i], "-m")==0) {
MIN_MERGE_SIZE = atol(argv[++i]);
}
#ifdef _OPENMP
else if (strcmp(argv[i], "-c")==0) {
CUTOFF = atoi(argv[++i]);
}
#endif
else {
#ifdef _OPENMP
fprintf(stderr, "Usage: %s [-n vector_size -s MIN_SORT_SIZE -m MIN_MERGE_SIZE] -c CUTOFF\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-n vector_size -s MIN_SORT_SIZE -m MIN_MERGE_SIZE]\n", argv[0]);
#endif
fprintf(stderr, " -n to specify the size of the vector (in Kelements) to sort (default 32768)\n");
fprintf(stderr, " -s to specify the size of the vector (in elements) that breaks recursion in the sort phase (default 1024)\n");
fprintf(stderr, " -m to specify the size of the vector (in elements) that breaks recursion in the merge phase (default 1024)\n");
#ifdef _OPENMP
fprintf(stderr, " -c to specify the cut off recursion level to stop task generation in OpenMP (default 16)\n");
#endif
return EXIT_FAILURE;
}
}
fprintf(stdout, "*****************************************************************************************\n");
fprintf(stdout, "Problem size (in number of elements): N=%ld, MIN_SORT_SIZE=%ld, MIN_MERGE_SIZE=%ld\n", N/1024, MIN_SORT_SIZE, MIN_MERGE_SIZE);
#ifdef _OPENMP
fprintf(stdout, "Cut-off level: CUTOFF=%d\n", CUTOFF);
fprintf(stdout, "Number of threads in OpenMP: OMP_NUM_THREADS=%d\n", omp_get_max_threads());
#endif
fprintf(stdout, "*****************************************************************************************\n");
T *data = malloc(N*sizeof(T));
T *tmp = malloc(N*sizeof(T));
double stamp;
START_COUNT_TIME;
#pragma omp parallel
#pragma omp single
initialize(N, data);
#pragma omp parallel
#pragma omp single
clear(N, tmp);
STOP_COUNT_TIME("Initialization time in seconds");
START_COUNT_TIME;
#pragma omp parallel
#pragma omp single
multisort(N, data, tmp,0);
STOP_COUNT_TIME("Multisort execution time");
START_COUNT_TIME;
check_sorted (N, data);
STOP_COUNT_TIME("Check sorted data execution time");
fprintf(stdout, "Multisort program finished\n");
fprintf(stdout, "*****************************************************************************************\n");
return 0;
}
|
no_thread_num_clause.c | // RUN: %libomp-compile-and-run | FileCheck %s
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s
// REQUIRES: ompt
#include "callback.h"
int main()
{
omp_set_num_threads(4);
#pragma omp parallel
{
print_ids(0);
print_ids(1);
}
print_fuzzy_address(1);
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_thread_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_thread_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: parallel_data initially not null
// CHECK-NOT: 0: task_data initially not null
// CHECK-NOT: 0: thread_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// Note that we cannot ensure that the worker threads have already called barrier_end and implicit_task_end before parallel_end!
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS: 0: NULL_POINTER=[[NULL:.*$]]
// THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_initial=1, thread_id=[[MASTER_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=0, task_id=[[INITIAL_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, invoker={{[0-9]+}}
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
GB_unaryop__lnot_uint16_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_uint64
// op(A') function: GB_tran__lnot_uint16_uint64
// C type: uint16_t
// A type: uint64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_uint64
(
uint16_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_bool_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_bool_int32
// op(A') function: GB_tran__ainv_bool_int32
// C type: bool
// A type: int32_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_bool_int32
(
bool *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_bool_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ImageSharpenUtils.h | #ifndef CAPTURE3_IMAGE_SHARPEN_UTILS_H
#define CAPTURE3_IMAGE_SHARPEN_UTILS_H
#include <cmath>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include "../engine/objects/image/Image.h"
namespace Capture3
{
static void sharpenImage(Image &image, unsigned int radius, double strength)
{
// get image size
const unsigned int imageArea = image.getSize().getArea();
const unsigned int imageWidth = image.getSize().getWidth();
const unsigned int imageHeight = image.getSize().getHeight();
// Temp data
cv::Mat temp(cv::Size(imageWidth, imageHeight), CV_64FC1, cv::Scalar(0));
cv::Mat blur(cv::Size(imageWidth, imageHeight), CV_64FC1, cv::Scalar(0));
// Fetch pointers and size
double *labData = image.getLAB().getData();
auto *tempData = (double *) temp.data;
auto *blurData = (double *) blur.data;
// We will only apply sharpening on the lightness channel,
// afterwards we mix this again to create a RGB output.
// Sharpening the lightness channel creates a very natural
// looking result and avoids heavy color changes around edges.
#pragma omp parallel for schedule(static)
for (unsigned int i = 0; i < imageArea; i++) {
tempData[i] = labData[i * 3];
}
// Apply unsharp masking
cv::GaussianBlur(temp, blur, cv::Size(0, 0), radius, radius);
cv::addWeighted(temp, 1.0 + strength, blur, -strength, 0, blur);
// Copy the result
#pragma omp parallel for schedule(static)
for (unsigned int i = 0; i < imageArea; i++) {
labData[i * 3] = blurData[i];
}
// Release images
blur.release();
temp.release();
// Convert LAB to other channels
image.convertLAB();
}
}
#endif // CAPTURE3_IMAGE_HISTOGRAM_UTILS_H
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4 * 4, inch, outch);
// G
const float ktm[4][3] = {
{1.0f, 0.0f, 0.0f},
{1.0f / 2, 1.0f / 2, 1.0f / 2},
{1.0f / 2, -1.0f / 2, 1.0f / 2},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4 * 4, tiles, inch, 4u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
float* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 2;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
#if __AVX__
__m128 _d0, _d1, _d2, _d3;
__m128 _w0, _w1, _w2, _w3;
// load
_d0 = _mm_loadu_ps(r0);
_d1 = _mm_loadu_ps(r1);
_d2 = _mm_loadu_ps(r2);
_d3 = _mm_loadu_ps(r3);
// w = B_t * d
_w0 = _mm_sub_ps(_d0, _d2);
_w1 = _mm_add_ps(_d1, _d2);
_w2 = _mm_sub_ps(_d2, _d1);
_w3 = _mm_sub_ps(_d3, _d1);
// transpose d to d_t
_MM_TRANSPOSE4_PS(_w0, _w1, _w2, _w3);
// d = B_t * d_t
_d0 = _mm_sub_ps(_w0, _w2);
_d1 = _mm_add_ps(_w1, _w2);
_d2 = _mm_sub_ps(_w2, _w1);
_d3 = _mm_sub_ps(_w3, _w1);
// save to out_tm
_mm_storeu_ps(out_tm0, _d0);
_mm_storeu_ps(out_tm0 + 4, _d1);
_mm_storeu_ps(out_tm0 + 8, _d2);
_mm_storeu_ps(out_tm0 + 12, _d3);
#else
float d0[4], d1[4], d2[4], d3[4];
float w0[4], w1[4], w2[4], w3[4];
float t0[4], t1[4], t2[4], t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
}
// d = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n] = d0[n];
out_tm0[n + 4] = d1[n];
out_tm0[n + 8] = d2[n];
out_tm0[n + 12] = d3[n];
}
#endif
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p + 1);
Mat out2_tm = top_blob_tm.channel(p + 2);
Mat out3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p + 1);
const Mat kernel2_tm = kernel_tm.channel(p + 2);
const Mat kernel3_tm = kernel_tm.channel(p + 3);
for (int i = 0; i < tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float* output1_tm = out1_tm.row(i);
float* output2_tm = out2_tm.row(i);
float* output3_tm = out3_tm.row(i);
#if __AVX__
float zero_val = 0.f;
__m256 _sum0 = _mm256_broadcast_ss(&zero_val);
__m256 _sum0n = _mm256_broadcast_ss(&zero_val);
__m256 _sum1 = _mm256_broadcast_ss(&zero_val);
__m256 _sum1n = _mm256_broadcast_ss(&zero_val);
__m256 _sum2 = _mm256_broadcast_ss(&zero_val);
__m256 _sum2n = _mm256_broadcast_ss(&zero_val);
__m256 _sum3 = _mm256_broadcast_ss(&zero_val);
__m256 _sum3n = _mm256_broadcast_ss(&zero_val);
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0 + 8);
// k0
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0 + 8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1 + 8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2 + 8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3 + 8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k1
_r0 = _mm256_loadu_ps(r1);
_r0n = _mm256_loadu_ps(r1 + 8);
_k0 = _mm256_loadu_ps(k0 + 16);
_k0n = _mm256_loadu_ps(k0 + 24);
_k1 = _mm256_loadu_ps(k1 + 16);
_k1n = _mm256_loadu_ps(k1 + 24);
_k2 = _mm256_loadu_ps(k2 + 16);
_k2n = _mm256_loadu_ps(k2 + 24);
_k3 = _mm256_loadu_ps(k3 + 16);
_k3n = _mm256_loadu_ps(k3 + 24);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k2
_r0 = _mm256_loadu_ps(r2);
_r0n = _mm256_loadu_ps(r2 + 8);
_k0 = _mm256_loadu_ps(k0 + 32);
_k0n = _mm256_loadu_ps(k0 + 40);
_k1 = _mm256_loadu_ps(k1 + 32);
_k1n = _mm256_loadu_ps(k1 + 40);
_k2 = _mm256_loadu_ps(k2 + 32);
_k2n = _mm256_loadu_ps(k2 + 40);
_k3 = _mm256_loadu_ps(k3 + 32);
_k3n = _mm256_loadu_ps(k3 + 40);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
// k3
_r0 = _mm256_loadu_ps(r3);
_r0n = _mm256_loadu_ps(r3 + 8);
_k0 = _mm256_loadu_ps(k0 + 48);
_k0n = _mm256_loadu_ps(k0 + 56);
_k1 = _mm256_loadu_ps(k1 + 48);
_k1n = _mm256_loadu_ps(k1 + 56);
_k2 = _mm256_loadu_ps(k2 + 48);
_k2n = _mm256_loadu_ps(k2 + 56);
_k3 = _mm256_loadu_ps(k3 + 48);
_k3n = _mm256_loadu_ps(k3 + 56);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
__m256 _r0 = _mm256_loadu_ps(r0);
__m256 _r0n = _mm256_loadu_ps(r0 + 8);
__m256 _k0 = _mm256_loadu_ps(k0);
__m256 _k0n = _mm256_loadu_ps(k0 + 8);
__m256 _k1 = _mm256_loadu_ps(k1);
__m256 _k1n = _mm256_loadu_ps(k1 + 8);
__m256 _k2 = _mm256_loadu_ps(k2);
__m256 _k2n = _mm256_loadu_ps(k2 + 8);
__m256 _k3 = _mm256_loadu_ps(k3);
__m256 _k3n = _mm256_loadu_ps(k3 + 8);
_sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0);
_sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n);
_sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1);
_sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n);
_sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2);
_sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n);
_sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3);
_sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n);
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output0_tm + 8, _sum0n);
_mm256_storeu_ps(output1_tm, _sum1);
_mm256_storeu_ps(output1_tm + 8, _sum1n);
_mm256_storeu_ps(output2_tm, _sum2);
_mm256_storeu_ps(output2_tm + 8, _sum2n);
_mm256_storeu_ps(output3_tm, _sum3);
_mm256_storeu_ps(output3_tm + 8, _sum3n);
#else
float sum0[16] = {0.0f};
float sum1[16] = {0.0f};
float sum2[16] = {0.0f};
float sum3[16] = {0.0f};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
k0 += 16;
sum0[n] += r1[n] * k0[n];
k0 += 16;
sum0[n] += r2[n] * k0[n];
k0 += 16;
sum0[n] += r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += r0[n] * k1[n];
k1 += 16;
sum1[n] += r1[n] * k1[n];
k1 += 16;
sum1[n] += r2[n] * k1[n];
k1 += 16;
sum1[n] += r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += r0[n] * k2[n];
k2 += 16;
sum2[n] += r1[n] * k2[n];
k2 += 16;
sum2[n] += r2[n] * k2[n];
k2 += 16;
sum2[n] += r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += r0[n] * k3[n];
k3 += 16;
sum3[n] += r1[n] * k3[n];
k3 += 16;
sum3[n] += r2[n] * k3[n];
k3 += 16;
sum3[n] += r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
sum1[n] += r0[n] * k1[n];
sum2[n] += r0[n] * k2[n];
sum3[n] += r0[n] * k3[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i = 0; i < tiles; i++)
{
float* output0_tm = out0_tm.row(i);
float sum0[16] = {0.0f};
int q = 0;
for (; q + 3 < inch; q += 4)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* r1 = bottom_blob_tm.channel(q + 1).row(i);
const float* r2 = bottom_blob_tm.channel(q + 2).row(i);
const float* r3 = bottom_blob_tm.channel(q + 3).row(i);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q + 1);
const float* k2 = kernel0_tm.row(q + 2);
const float* k3 = kernel0_tm.row(q + 3);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
sum0[n] += r1[n] * k1[n];
sum0[n] += r2[n] * k2[n];
sum0[n] += r3[n] * k3[n];
}
}
for (; q < inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q).row(i);
const float* k0 = kernel0_tm.row(q);
for (int n = 0; n < 16; n++)
{
sum0[n] += r0[n] * k0[n];
}
}
for (int n = 0; n < 16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm / 4; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
float* outRow0 = out.row(j * 2);
float* outRow1 = out.row(j * 2 + 1);
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tile = out_tm.row(j * nRowBlocks + i);
float s0[4], s1[4], s2[4], s3[4];
float w0[4], w1[4];
float d0[2], d1[2], d2[2], d3[2];
float o0[2], o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 4];
s2[n] = out_tile[n + 8];
s3[n] = out_tile[n + 12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d1[0] = w0[1];
d1[1] = w1[1];
d2[0] = w0[2];
d2[1] = w1[2];
d3[0] = w0[3];
d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + bias0;
o1[n] = d1[n] - d2[n] + d3[n] + bias0;
}
// save to top blob tm
outRow0[0] = o0[0];
outRow0[1] = o0[1];
outRow1[0] = o1[0];
outRow1[1] = o1[1];
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, std::vector<Mat>& kernel_tm2, int inch, int outch)
{
Mat kernel_tm(6 * 6, inch, outch);
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
for (int r = 0; r < 9; r++)
{
Mat kernel_tm_test(4 * 8, inch, outch / 8 + (outch % 8) / 4 + outch % 4);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p + 1);
const float* kernel2 = (const float*)kernel_tm.channel(p + 2);
const float* kernel3 = (const float*)kernel_tm.channel(p + 3);
const float* kernel4 = (const float*)kernel_tm.channel(p + 4);
const float* kernel5 = (const float*)kernel_tm.channel(p + 5);
const float* kernel6 = (const float*)kernel_tm.channel(p + 6);
const float* kernel7 = (const float*)kernel_tm.channel(p + 7);
float* ktmp = kernel_tm_test.channel(p / 8);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
const float* kernel1 = (const float*)kernel_tm.channel(p + 1);
const float* kernel2 = (const float*)kernel_tm.channel(p + 2);
const float* kernel3 = (const float*)kernel_tm.channel(p + 3);
float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const float* kernel0 = (const float*)kernel_tm.channel(p);
float* ktmp = kernel_tm_test.channel(p / 8 + (p % 8) / 4 + p % 4);
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm2.push_back(kernel_tm_test);
}
}
static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat>& kernel_tm_test, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4, inch, tiles * 9, elemsize, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm.channel(tiles * 0 + j * nRowBlocks + i).row(q);
float* out_tm1 = bottom_blob_tm.channel(tiles * 1 + j * nRowBlocks + i).row(q);
float* out_tm2 = bottom_blob_tm.channel(tiles * 2 + j * nRowBlocks + i).row(q);
float* out_tm3 = bottom_blob_tm.channel(tiles * 3 + j * nRowBlocks + i).row(q);
float* out_tm4 = bottom_blob_tm.channel(tiles * 4 + j * nRowBlocks + i).row(q);
float* out_tm5 = bottom_blob_tm.channel(tiles * 5 + j * nRowBlocks + i).row(q);
float* out_tm6 = bottom_blob_tm.channel(tiles * 6 + j * nRowBlocks + i).row(q);
float* out_tm7 = bottom_blob_tm.channel(tiles * 7 + j * nRowBlocks + i).row(q);
float* out_tm8 = bottom_blob_tm.channel(tiles * 8 + j * nRowBlocks + i).row(q);
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
#if (defined _WIN32 && !(defined __MINGW32__))
{
_t0.m256_f32[0] = _w0.m256_f32[0];
_t1.m256_f32[0] = _w0.m256_f32[1];
_t2.m256_f32[0] = _w0.m256_f32[2];
_t3.m256_f32[0] = _w0.m256_f32[3];
_t4.m256_f32[0] = _w0.m256_f32[4];
_t5.m256_f32[0] = _w0.m256_f32[5];
_t0.m256_f32[1] = _w1.m256_f32[0];
_t1.m256_f32[1] = _w1.m256_f32[1];
_t2.m256_f32[1] = _w1.m256_f32[2];
_t3.m256_f32[1] = _w1.m256_f32[3];
_t4.m256_f32[1] = _w1.m256_f32[4];
_t5.m256_f32[1] = _w1.m256_f32[5];
_t0.m256_f32[2] = _w2.m256_f32[0];
_t1.m256_f32[2] = _w2.m256_f32[1];
_t2.m256_f32[2] = _w2.m256_f32[2];
_t3.m256_f32[2] = _w2.m256_f32[3];
_t4.m256_f32[2] = _w2.m256_f32[4];
_t5.m256_f32[2] = _w2.m256_f32[5];
_t0.m256_f32[3] = _w3.m256_f32[0];
_t1.m256_f32[3] = _w3.m256_f32[1];
_t2.m256_f32[3] = _w3.m256_f32[2];
_t3.m256_f32[3] = _w3.m256_f32[3];
_t4.m256_f32[3] = _w3.m256_f32[4];
_t5.m256_f32[3] = _w3.m256_f32[5];
_t0.m256_f32[4] = _w4.m256_f32[0];
_t1.m256_f32[4] = _w4.m256_f32[1];
_t2.m256_f32[4] = _w4.m256_f32[2];
_t3.m256_f32[4] = _w4.m256_f32[3];
_t4.m256_f32[4] = _w4.m256_f32[4];
_t5.m256_f32[4] = _w4.m256_f32[5];
_t0.m256_f32[5] = _w5.m256_f32[0];
_t1.m256_f32[5] = _w5.m256_f32[1];
_t2.m256_f32[5] = _w5.m256_f32[2];
_t3.m256_f32[5] = _w5.m256_f32[3];
_t4.m256_f32[5] = _w5.m256_f32[4];
_t5.m256_f32[5] = _w5.m256_f32[5];
}
#else
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
#endif
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
float output_n0[8] = {0.f};
_mm256_storeu_ps(output_n0, _n0);
float output_n1[8] = {0.f};
_mm256_storeu_ps(output_n1, _n1);
float output_n2[8] = {0.f};
_mm256_storeu_ps(output_n2, _n2);
float output_n3[8] = {0.f};
_mm256_storeu_ps(output_n3, _n3);
float output_n4[8] = {0.f};
_mm256_storeu_ps(output_n4, _n4);
float output_n5[8] = {0.f};
_mm256_storeu_ps(output_n5, _n5);
out_tm0[0] = output_n0[0];
out_tm0[1] = output_n0[1];
out_tm0[2] = output_n0[2];
out_tm0[3] = output_n0[3];
out_tm1[0] = output_n0[4];
out_tm1[1] = output_n0[5];
out_tm1[2] = output_n1[0];
out_tm1[3] = output_n1[1];
out_tm2[0] = output_n1[2];
out_tm2[1] = output_n1[3];
out_tm2[2] = output_n1[4];
out_tm2[3] = output_n1[5];
out_tm3[0] = output_n2[0];
out_tm3[1] = output_n2[1];
out_tm3[2] = output_n2[2];
out_tm3[3] = output_n2[3];
out_tm4[0] = output_n2[4];
out_tm4[1] = output_n2[5];
out_tm4[2] = output_n3[0];
out_tm4[3] = output_n3[1];
out_tm5[0] = output_n3[2];
out_tm5[1] = output_n3[3];
out_tm5[2] = output_n3[4];
out_tm5[3] = output_n3[5];
out_tm6[0] = output_n4[0];
out_tm6[1] = output_n4[1];
out_tm6[2] = output_n4[2];
out_tm6[3] = output_n4[3];
out_tm7[0] = output_n4[4];
out_tm7[1] = output_n4[5];
out_tm7[2] = output_n5[0];
out_tm7[3] = output_n5[1];
out_tm8[0] = output_n5[2];
out_tm8[1] = output_n5[3];
out_tm8[2] = output_n5[4];
out_tm8[3] = output_n5[5];
#else
float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __AVX__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, elemsize, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
float* output4_tm = top_blob_tm.channel(p + 4);
float* output5_tm = top_blob_tm.channel(p + 5);
float* output6_tm = top_blob_tm.channel(p + 6);
float* output7_tm = top_blob_tm.channel(p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q = 0;
for (; q + 3 < inch; q = q + 4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0 + 4);
__m128 _r2 = _mm_loadu_ps(r0 + 8);
__m128 _r3 = _mm_loadu_ps(r0 + 12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
sum4[n] += r0[n] * kptr[n + 16];
sum5[n] += r0[n] * kptr[n + 20];
sum6[n] += r0[n] * kptr[n + 24];
sum7[n] += r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test[r].channel(p / 8 + (p % 8) / 4 + p % 4);
const float* r0 = bottom_blob_tm.channel(tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
// for (int p=0; p<outch; p++)
// {
// Mat out0_tm = top_blob_tm.channel(p);
// const Mat kernel0_tm = kernel_tm.channel(p);
// for (int i=0; i<tiles; i++)
// {
// float* output0_tm = out0_tm.row<int>(i);
// int sum0[36] = {0};
// for (int q=0; q<inch; q++)
// {
// const float* r0 = bottom_blob_tm.channel(q).row<float>(i);
// const float* k0 = kernel0_tm.row<float>(q);
// for (int n=0; n<36; n++)
// {
// sum0[n] += (int)r0[n] * k0[n];
// }
// }
// for (int n=0; n<36; n++)
// {
// output0_tm[n] = sum0[n];
// }
// }
// }
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, opt.workspace_allocator);
}
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* out_tile = top_blob_tm.channel(p);
float* outRow0 = top_blob_bordered.channel(p);
float* outRow1 = outRow0 + outw;
float* outRow2 = outRow0 + outw * 2;
float* outRow3 = outRow0 + outw * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
// TODO AVX2
float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
float w0[6], w1[6], w2[6], w3[6];
float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
float o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw * 3;
outRow1 += outw * 3;
outRow2 += outw * 3;
outRow3 += outw * 3;
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img;
const float* r1 = img + w;
const float* r2 = img + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// Representation of an OpenMP canonical loop.
///
/// OpenMP 1.0 C/C++, section 2.4.1 for Construct; canonical-shape
/// OpenMP 2.0 C/C++, section 2.4.1 for Construct; canonical-shape
/// OpenMP 2.5, section 2.5.1 Loop Construct; canonical form
/// OpenMP 3.1, section 2.5.1 Loop Construct; canonical form
/// OpenMP 4.0, section 2.6 Canonical Loop Form
/// OpenMP 4.5, section 2.6 Canonical Loop Form
/// OpenMP 5.0, section 2.9.1 Canonical Loop Form
/// OpenMP 5.1, section 2.11.1 Canonical Loop Nest Form
///
/// An OpenMP canonical loop is a for-statement or range-based for-statement
/// with additional requirements that ensure that the number of iterations is
/// known before entering the loop and allow skipping to an arbitrary iteration.
/// The OMPCanonicalLoop AST node wraps a ForStmt or CXXForRangeStmt that is
/// known to fulfill OpenMP's canonical loop requirements because of being
/// associated to an OMPLoopBasedDirective. That is, the general structure is:
///
/// OMPLoopBasedDirective
/// [`- CapturedStmt ]
/// [ `- CapturedDecl]
/// ` OMPCanonicalLoop
/// `- ForStmt/CXXForRangeStmt
/// `- Stmt
///
/// One or multiple CapturedStmt/CapturedDecl pairs may be inserted by some
/// directives such as OMPParallelForDirective, but others do not need them
/// (such as OMPTileDirective). In The OMPCanonicalLoop and
/// ForStmt/CXXForRangeStmt pair is repeated for loop associated with the
/// directive. A OMPCanonicalLoop must not appear in the AST unless associated
/// with a OMPLoopBasedDirective. In an imperfectly nested loop nest, the
/// OMPCanonicalLoop may also be wrapped in a CompoundStmt:
///
/// [...]
/// ` OMPCanonicalLoop
/// `- ForStmt/CXXForRangeStmt
/// `- CompoundStmt
/// |- Leading in-between code (if any)
/// |- OMPCanonicalLoop
/// | `- ForStmt/CXXForRangeStmt
/// | `- ...
/// `- Trailing in-between code (if any)
///
/// The leading/trailing in-between code must not itself be a OMPCanonicalLoop
/// to avoid confusion which loop belongs to the nesting.
///
/// There are three different kinds of iteration variables for different
/// purposes:
/// * Loop user variable: The user-accessible variable with different value for
/// each iteration.
/// * Loop iteration variable: The variable used to identify a loop iteration;
/// for range-based for-statement, this is the hidden iterator '__begin'. For
/// other loops, it is identical to the loop user variable. Must be a
/// random-access iterator, pointer or integer type.
/// * Logical iteration counter: Normalized loop counter starting at 0 and
/// incrementing by one at each iteration. Allows abstracting over the type
/// of the loop iteration variable and is always an unsigned integer type
/// appropriate to represent the range of the loop iteration variable. Its
/// value corresponds to the logical iteration number in the OpenMP
/// specification.
///
/// This AST node provides two captured statements:
/// * The distance function which computes the number of iterations.
/// * The loop user variable function that computes the loop user variable when
/// given a logical iteration number.
///
/// These captured statements provide the link between C/C++ semantics and the
/// logical iteration counters used by the OpenMPIRBuilder which is
/// language-agnostic and therefore does not know e.g. how to advance a
/// random-access iterator. The OpenMPIRBuilder will use this information to
/// apply simd, workshare-loop, distribute, taskloop and loop directives to the
/// loop. For compatibility with the non-OpenMPIRBuilder codegen path, an
/// OMPCanonicalLoop can itself also be wrapped into the CapturedStmts of an
/// OMPLoopDirective and skipped when searching for the associated syntactical
/// loop.
///
/// Example:
/// <code>
/// std::vector<std::string> Container{1,2,3};
/// for (std::string Str : Container)
/// Body(Str);
/// </code>
/// which is syntactic sugar for approximately:
/// <code>
/// auto &&__range = Container;
/// auto __begin = std::begin(__range);
/// auto __end = std::end(__range);
/// for (; __begin != __end; ++__begin) {
/// std::String Str = *__begin;
/// Body(Str);
/// }
/// </code>
/// In this example, the loop user variable is `Str`, the loop iteration
/// variable is `__begin` of type `std::vector<std::string>::iterator` and the
/// logical iteration number type is `size_t` (unsigned version of
/// `std::vector<std::string>::iterator::difference_type` aka `ptrdiff_t`).
/// Therefore, the distance function will be
/// <code>
/// [&](size_t &Result) { Result = __end - __begin; }
/// </code>
/// and the loop variable function is
/// <code>
/// [&,__begin](std::vector<std::string>::iterator &Result, size_t Logical) {
/// Result = __begin + Logical;
/// }
/// </code>
/// The variable `__begin`, aka the loop iteration variable, is captured by
/// value because it is modified in the loop body, but both functions require
/// the initial value. The OpenMP specification explicitly leaves unspecified
/// when the loop expressions are evaluated such that a capture by reference is
/// sufficient.
class OMPCanonicalLoop : public Stmt {
friend class ASTStmtReader;
friend class ASTStmtWriter;
/// Children of this AST node.
enum {
LOOP_STMT,
DISTANCE_FUNC,
LOOPVAR_FUNC,
LOOPVAR_REF,
LastSubStmt = LOOPVAR_REF
};
private:
/// This AST node's children.
Stmt *SubStmts[LastSubStmt + 1] = {};
OMPCanonicalLoop() : Stmt(StmtClass::OMPCanonicalLoopClass) {}
public:
/// Create a new OMPCanonicalLoop.
static OMPCanonicalLoop *create(const ASTContext &Ctx, Stmt *LoopStmt,
CapturedStmt *DistanceFunc,
CapturedStmt *LoopVarFunc,
DeclRefExpr *LoopVarRef) {
OMPCanonicalLoop *S = new (Ctx) OMPCanonicalLoop();
S->setLoopStmt(LoopStmt);
S->setDistanceFunc(DistanceFunc);
S->setLoopVarFunc(LoopVarFunc);
S->setLoopVarRef(LoopVarRef);
return S;
}
/// Create an empty OMPCanonicalLoop for deserialization.
static OMPCanonicalLoop *createEmpty(const ASTContext &Ctx) {
return new (Ctx) OMPCanonicalLoop();
}
static bool classof(const Stmt *S) {
return S->getStmtClass() == StmtClass::OMPCanonicalLoopClass;
}
SourceLocation getBeginLoc() const { return getLoopStmt()->getBeginLoc(); }
SourceLocation getEndLoc() const { return getLoopStmt()->getEndLoc(); }
/// Return this AST node's children.
/// @{
child_range children() {
return child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1);
}
const_child_range children() const {
return const_child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1);
}
/// @}
/// The wrapped syntactic loop statement (ForStmt or CXXForRangeStmt).
/// @{
Stmt *getLoopStmt() { return SubStmts[LOOP_STMT]; }
const Stmt *getLoopStmt() const { return SubStmts[LOOP_STMT]; }
void setLoopStmt(Stmt *S) {
assert((isa<ForStmt>(S) || isa<CXXForRangeStmt>(S)) &&
"Canonical loop must be a for loop (range-based or otherwise)");
SubStmts[LOOP_STMT] = S;
}
/// @}
/// The function that computes the number of loop iterations. Can be evaluated
/// before entering the loop but after the syntactical loop's init
/// statement(s).
///
/// Function signature: void(LogicalTy &Result)
/// Any values necessary to compute the distance are captures of the closure.
/// @{
CapturedStmt *getDistanceFunc() {
return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]);
}
const CapturedStmt *getDistanceFunc() const {
return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]);
}
void setDistanceFunc(CapturedStmt *S) {
assert(S && "Expected non-null captured statement");
SubStmts[DISTANCE_FUNC] = S;
}
/// @}
/// The function that computes the loop user variable from a logical iteration
/// counter. Can be evaluated as first statement in the loop.
///
/// Function signature: void(LoopVarTy &Result, LogicalTy Number)
/// Any other values required to compute the loop user variable (such as start
/// value, step size) are captured by the closure. In particular, the initial
/// value of loop iteration variable is captured by value to be unaffected by
/// previous iterations.
/// @{
CapturedStmt *getLoopVarFunc() {
return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]);
}
const CapturedStmt *getLoopVarFunc() const {
return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]);
}
void setLoopVarFunc(CapturedStmt *S) {
assert(S && "Expected non-null captured statement");
SubStmts[LOOPVAR_FUNC] = S;
}
/// @}
/// Reference to the loop user variable as accessed in the loop body.
/// @{
DeclRefExpr *getLoopVarRef() {
return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]);
}
const DeclRefExpr *getLoopVarRef() const {
return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]);
}
void setLoopVarRef(DeclRefExpr *E) {
assert(E && "Expected non-null loop variable");
SubStmts[LOOPVAR_REF] = E;
}
/// @}
};
/// This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
friend class ASTStmtWriter;
/// Kind of the directive.
OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
/// Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// Ending location of the directive.
SourceLocation EndLoc;
/// Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
if (!Data)
return llvm::None;
return Data->getClauses();
}
protected:
/// Data, associated with the directive.
OMPChildren *Data = nullptr;
/// Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
OMPExecutableDirective(StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)) {}
template <typename T, typename... Params>
static T *createDirective(const ASTContext &C, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, unsigned NumChildren,
Params &&... P) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(Clauses.size(), AssociatedStmt,
NumChildren),
alignof(T));
auto *Data = OMPChildren::Create(reinterpret_cast<T *>(Mem) + 1, Clauses,
AssociatedStmt, NumChildren);
auto *Inst = new (Mem) T(std::forward<Params>(P)...);
Inst->Data = Data;
return Inst;
}
template <typename T, typename... Params>
static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses,
bool HasAssociatedStmt, unsigned NumChildren,
Params &&... P) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt,
NumChildren),
alignof(T));
auto *Data =
OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses,
HasAssociatedStmt, NumChildren);
auto *Inst = new (Mem) T(std::forward<Params>(P)...);
Inst->Data = Data;
return Inst;
}
template <typename T>
static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses,
bool HasAssociatedStmt = false,
unsigned NumChildren = 0) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt,
NumChildren),
alignof(T));
auto *Data =
OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses,
HasAssociatedStmt, NumChildren);
auto *Inst = new (Mem) T;
Inst->Data = Data;
return Inst;
}
public:
/// Iterates over expressions/statements used in the construct.
class used_clauses_child_iterator
: public llvm::iterator_adaptor_base<
used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator,
std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> {
ArrayRef<OMPClause *>::iterator End;
OMPClause::child_iterator ChildI, ChildEnd;
void MoveToNext() {
if (ChildI != ChildEnd)
return;
while (this->I != End) {
++this->I;
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
if (ChildI != ChildEnd)
return;
}
}
}
public:
explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses)
: used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
MoveToNext();
}
}
Stmt *operator*() const { return *ChildI; }
Stmt *operator->() const { return **this; }
used_clauses_child_iterator &operator++() {
++ChildI;
if (ChildI != ChildEnd)
return *this;
if (this->I != End) {
++this->I;
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
}
}
MoveToNext();
return *this;
}
};
static llvm::iterator_range<used_clauses_child_iterator>
used_clauses_children(ArrayRef<OMPClause *> Clauses) {
return {used_clauses_child_iterator(Clauses),
used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))};
}
/// Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only clauses of type SpecificClause.
template <typename SpecificClause>
class specific_clause_iterator
: public llvm::iterator_adaptor_base<
specific_clause_iterator<SpecificClause>,
ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag,
const SpecificClause *, ptrdiff_t, const SpecificClause *,
const SpecificClause *> {
ArrayRef<OMPClause *>::const_iterator End;
void SkipToNextClause() {
while (this->I != End && !isa<SpecificClause>(*this->I))
++this->I;
}
public:
explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses)
: specific_clause_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
SkipToNextClause();
}
const SpecificClause *operator*() const {
return cast<SpecificClause>(*this->I);
}
const SpecificClause *operator->() const { return **this; }
specific_clause_iterator &operator++() {
++this->I;
SkipToNextClause();
return *this;
}
};
template <typename SpecificClause>
static llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind(ArrayRef<OMPClause *> Clauses) {
return {specific_clause_iterator<SpecificClause>(Clauses),
specific_clause_iterator<SpecificClause>(
llvm::makeArrayRef(Clauses.end(), 0))};
}
template <typename SpecificClause>
llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind() const {
return getClausesOfKind<SpecificClause>(clauses());
}
/// Gets a single clause of the specified kind associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of this kind is associated with
/// the directive.
template <typename SpecificClause>
const SpecificClause *getSingleClause() const {
auto Clauses = getClausesOfKind<SpecificClause>();
if (Clauses.begin() != Clauses.end()) {
assert(std::next(Clauses.begin()) == Clauses.end() &&
"There are at least 2 clauses of the specified kind");
return *Clauses.begin();
}
return nullptr;
}
/// Returns true if the current directive has one or more clauses of a
/// specific kind.
template <typename SpecificClause>
bool hasClausesOfKind() const {
auto Clauses = getClausesOfKind<SpecificClause>();
return Clauses.begin() != Clauses.end();
}
/// Returns starting location of directive kind.
SourceLocation getBeginLoc() const { return StartLoc; }
/// Returns ending location of directive.
SourceLocation getEndLoc() const { return EndLoc; }
/// Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Get number of clauses.
unsigned getNumClauses() const {
if (!Data)
return 0;
return Data->getNumClauses();
}
/// Returns specified clause.
///
/// \param I Number of clause.
///
OMPClause *getClause(unsigned I) const { return clauses()[I]; }
/// Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return Data && Data->hasAssociatedStmt(); }
/// Returns statement associated with the directive.
const Stmt *getAssociatedStmt() const {
return const_cast<OMPExecutableDirective *>(this)->getAssociatedStmt();
}
Stmt *getAssociatedStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
return Data->getAssociatedStmt();
}
/// Returns the captured statement associated with the
/// component region within the (combined) directive.
///
/// \param RegionKind Component region kind.
const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
return Data->getCapturedStmt(RegionKind, CaptureRegions);
}
/// Get innermost captured statement for the construct.
CapturedStmt *getInnermostCapturedStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
return Data->getInnermostCapturedStmt(CaptureRegions);
}
const CapturedStmt *getInnermostCapturedStmt() const {
return const_cast<OMPExecutableDirective *>(this)
->getInnermostCapturedStmt();
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!Data)
return child_range(child_iterator(), child_iterator());
return Data->getAssociatedStmtAsRange();
}
const_child_range children() const {
return const_cast<OMPExecutableDirective *>(this)->children();
}
ArrayRef<OMPClause *> clauses() const {
if (!Data)
return llvm::None;
return Data->getClauses();
}
/// Returns whether or not this is a Standalone directive.
///
/// Stand-alone directives are executable directives
/// that have no associated user code.
bool isStandaloneDirective() const;
/// Returns the AST node representing OpenMP structured-block of this
/// OpenMP executable directive,
/// Prerequisite: Executable Directive must not be Standalone directive.
const Stmt *getStructuredBlock() const {
return const_cast<OMPExecutableDirective *>(this)->getStructuredBlock();
}
Stmt *getStructuredBlock();
const Stmt *getRawStmt() const {
return const_cast<OMPExecutableDirective *>(this)->getRawStmt();
}
Stmt *getRawStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
return Data->getRawStmt();
}
};
/// This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelDirectiveClass,
llvm::omp::OMPD_parallel, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPParallelDirective()
: OMPExecutableDirective(OMPParallelDirectiveClass,
llvm::omp::OMPD_parallel, SourceLocation(),
SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// The base class for all loop-based directives, including loop transformation
/// directives.
class OMPLoopBasedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
protected:
/// Number of collapsed loops as specified by 'collapse' clause.
unsigned NumAssociatedLoops = 0;
/// Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param NumAssociatedLoops Number of loops associated with the construct.
///
OMPLoopBasedDirective(StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumAssociatedLoops)
: OMPExecutableDirective(SC, Kind, StartLoc, EndLoc),
NumAssociatedLoops(NumAssociatedLoops) {}
public:
/// The expressions built to support OpenMP loops in combined/composite
/// pragmas (e.g. pragma omp distribute parallel for)
struct DistCombinedHelperExprs {
/// DistributeLowerBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *LB;
/// DistributeUpperBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *UB;
/// DistributeEnsureUpperBound - used when composing 'omp distribute'
/// with 'omp for' in a same construct, EUB depends on DistUB
Expr *EUB;
/// Distribute loop iteration variable init used when composing 'omp
/// distribute'
/// with 'omp for' in a same construct
Expr *Init;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct
Expr *Cond;
/// Update of LowerBound for statically scheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NLB;
/// Update of UpperBound for statically scheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NUB;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct when schedule is chunked.
Expr *DistCond;
/// 'omp parallel for' loop condition used when composed with
/// 'omp distribute' in the same construct and when schedule is
/// chunked and the chunk size is 1.
Expr *ParForInDistCond;
};
/// The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// Loop iteration variable.
Expr *IterationVarRef;
/// Loop last iteration number.
Expr *LastIteration;
/// Loop number of iterations.
Expr *NumIterations;
/// Calculation of last iteration.
Expr *CalcLastIteration;
/// Loop pre-condition.
Expr *PreCond;
/// Loop condition.
Expr *Cond;
/// Loop iteration variable init.
Expr *Init;
/// Loop increment.
Expr *Inc;
/// IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// LowerBound - local variable passed to runtime.
Expr *LB;
/// UpperBound - local variable passed to runtime.
Expr *UB;
/// Stride - local variable passed to runtime.
Expr *ST;
/// EnsureUpperBound -- expression UB = min(UB, NumIterations).
Expr *EUB;
/// Update of LowerBound for statically scheduled 'omp for' loops.
Expr *NLB;
/// Update of UpperBound for statically scheduled 'omp for' loops.
Expr *NUB;
/// PreviousLowerBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevLB;
/// PreviousUpperBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevUB;
/// DistInc - increment expression for distribute loop when found
/// combined with a further loop level (e.g. in 'distribute parallel for')
/// expression IV = IV + ST
Expr *DistInc;
/// PrevEUB - expression similar to EUB but to be used when loop
/// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for'
/// when ensuring that the UB is either the calculated UB by the runtime or
/// the end of the assigned distribute chunk)
/// expression UB = min (UB, PrevUB)
Expr *PrevEUB;
/// Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// PrivateCounters Loop counters.
SmallVector<Expr *, 4> PrivateCounters;
/// Expressions for loop counters inits for CodeGen.
SmallVector<Expr *, 4> Inits;
/// Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// List of counters required for the generation of the non-rectangular
/// loops.
SmallVector<Expr *, 4> DependentCounters;
/// List of initializers required for the generation of the non-rectangular
/// loops.
SmallVector<Expr *, 4> DependentInits;
/// List of final conditions required for the generation of the
/// non-rectangular loops.
SmallVector<Expr *, 4> FinalsConditions;
/// Init statement for all captured expressions.
Stmt *PreInits;
/// Expressions used when combining OpenMP loop pragmas
DistCombinedHelperExprs DistCombinedFields;
/// Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// Initialize all the fields to null.
/// \param Size Number of elements in the
/// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions
/// arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
NumIterations = nullptr;
PrevLB = nullptr;
PrevUB = nullptr;
DistInc = nullptr;
PrevEUB = nullptr;
Counters.resize(Size);
PrivateCounters.resize(Size);
Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
DependentCounters.resize(Size);
DependentInits.resize(Size);
FinalsConditions.resize(Size);
for (unsigned I = 0; I < Size; ++I) {
Counters[I] = nullptr;
PrivateCounters[I] = nullptr;
Inits[I] = nullptr;
Updates[I] = nullptr;
Finals[I] = nullptr;
DependentCounters[I] = nullptr;
DependentInits[I] = nullptr;
FinalsConditions[I] = nullptr;
}
PreInits = nullptr;
DistCombinedFields.LB = nullptr;
DistCombinedFields.UB = nullptr;
DistCombinedFields.EUB = nullptr;
DistCombinedFields.Init = nullptr;
DistCombinedFields.Cond = nullptr;
DistCombinedFields.NLB = nullptr;
DistCombinedFields.NUB = nullptr;
DistCombinedFields.DistCond = nullptr;
DistCombinedFields.ParForInDistCond = nullptr;
}
};
/// Get number of collapsed loops.
unsigned getLoopsNumber() const { return NumAssociatedLoops; }
/// Try to find the next loop sub-statement in the specified statement \p
/// CurStmt.
/// \param TryImperfectlyNestedLoops true, if we need to try to look for the
/// imperfectly nested loop.
static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt,
bool TryImperfectlyNestedLoops);
static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt,
bool TryImperfectlyNestedLoops) {
return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt),
TryImperfectlyNestedLoops);
}
/// Calls the specified callback function for all the loops in \p CurStmt,
/// from the outermost to the innermost.
static bool
doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, Stmt *)> Callback);
static bool
doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, const Stmt *)> Callback) {
auto &&NewCallback = [Callback](unsigned Cnt, Stmt *CurStmt) {
return Callback(Cnt, CurStmt);
};
return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
NumLoops, NewCallback);
}
/// Calls the specified callback function for all the loop bodies in \p
/// CurStmt, from the outermost loop to the innermost.
static void doForAllLoopsBodies(
Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
llvm::function_ref<void(unsigned, Stmt *, Stmt *)> Callback);
static void doForAllLoopsBodies(
const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
llvm::function_ref<void(unsigned, const Stmt *, const Stmt *)> Callback) {
auto &&NewCallback = [Callback](unsigned Cnt, Stmt *Loop, Stmt *Body) {
Callback(Cnt, Loop, Body);
};
doForAllLoopsBodies(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
NumLoops, NewCallback);
}
static bool classof(const Stmt *T) {
if (auto *D = dyn_cast<OMPExecutableDirective>(T))
return isOpenMPLoopDirective(D->getDirectiveKind());
return false;
}
};
/// This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPLoopBasedDirective {
friend class ASTStmtReader;
/// Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are necessary for all the loop directives,
/// the next 8 are specific to the worksharing ones, and the next 11 are
/// used for combined constructs containing two pragmas associated to loops.
/// After the fixed children, three arrays of length NumAssociatedLoops are
/// allocated: loop counters, their updates and final values.
/// PrevLowerBound and PrevUpperBound are used to communicate blocking
/// information in composite constructs which require loop blocking
/// DistInc is used to generate the increment expression for the distribute
/// loop when combined with a further nested loop
/// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the
/// for loop when combined with a previous distribute loop in the same pragma
/// (e.g. 'distribute parallel for')
///
enum {
IterationVariableOffset = 0,
LastIterationOffset = 1,
CalcLastIterationOffset = 2,
PreConditionOffset = 3,
CondOffset = 4,
InitOffset = 5,
IncOffset = 6,
PreInitsOffset = 7,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals/dependent_counters/dependent_inits/finals_conditions
// arrays).
DefaultEnd = 8,
// The following 8 exprs are used by worksharing and distribute loops only.
IsLastIterVariableOffset = 8,
LowerBoundVariableOffset = 9,
UpperBoundVariableOffset = 10,
StrideVariableOffset = 11,
EnsureUpperBoundOffset = 12,
NextLowerBoundOffset = 13,
NextUpperBoundOffset = 14,
NumIterationsOffset = 15,
// Offset to the end for worksharing loop directives.
WorksharingEnd = 16,
PrevLowerBoundVariableOffset = 16,
PrevUpperBoundVariableOffset = 17,
DistIncOffset = 18,
PrevEnsureUpperBoundOffset = 19,
CombinedLowerBoundVariableOffset = 20,
CombinedUpperBoundVariableOffset = 21,
CombinedEnsureUpperBoundOffset = 22,
CombinedInitOffset = 23,
CombinedConditionOffset = 24,
CombinedNextLowerBoundOffset = 25,
CombinedNextUpperBoundOffset = 26,
CombinedDistConditionOffset = 27,
CombinedParForInDistConditionOffset = 28,
// Offset to the end (and start of the following
// counters/updates/finals/dependent_counters/dependent_inits/finals_conditions
// arrays) for combined distribute loop directives.
CombinedDistributeEnd = 29,
};
/// Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind())]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the private counters storage.
MutableArrayRef<Expr *> getPrivateCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the updates storage.
MutableArrayRef<Expr *> getInits() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
2 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
3 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
4 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the dependent counters storage.
MutableArrayRef<Expr *> getDependentCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
5 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the dependent inits storage.
MutableArrayRef<Expr *> getDependentInits() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
6 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the finals conditions storage.
MutableArrayRef<Expr *> getFinalsConditions() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
7 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
protected:
/// Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
///
OMPLoopDirective(StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, CollapsedNum) {}
/// Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
if (isOpenMPLoopBoundSharingDirective(Kind))
return CombinedDistributeEnd;
if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) ||
isOpenMPDistributeDirective(Kind))
return WorksharingEnd;
return DefaultEnd;
}
/// Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) +
8 * CollapsedNum; // Counters, PrivateCounters, Inits,
// Updates, Finals, DependentCounters,
// DependentInits, FinalsConditions.
}
void setIterationVariable(Expr *IV) {
Data->getChildren()[IterationVariableOffset] = IV;
}
void setLastIteration(Expr *LI) {
Data->getChildren()[LastIterationOffset] = LI;
}
void setCalcLastIteration(Expr *CLI) {
Data->getChildren()[CalcLastIterationOffset] = CLI;
}
void setPreCond(Expr *PC) { Data->getChildren()[PreConditionOffset] = PC; }
void setCond(Expr *Cond) { Data->getChildren()[CondOffset] = Cond; }
void setInit(Expr *Init) { Data->getChildren()[InitOffset] = Init; }
void setInc(Expr *Inc) { Data->getChildren()[IncOffset] = Inc; }
void setPreInits(Stmt *PreInits) {
Data->getChildren()[PreInitsOffset] = PreInits;
}
void setIsLastIterVariable(Expr *IL) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[IsLastIterVariableOffset] = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[LowerBoundVariableOffset] = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[UpperBoundVariableOffset] = UB;
}
void setStrideVariable(Expr *ST) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[StrideVariableOffset] = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[EnsureUpperBoundOffset] = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NextLowerBoundOffset] = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NextUpperBoundOffset] = NUB;
}
void setNumIterations(Expr *NI) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NumIterationsOffset] = NI;
}
void setPrevLowerBoundVariable(Expr *PrevLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevLowerBoundVariableOffset] = PrevLB;
}
void setPrevUpperBoundVariable(Expr *PrevUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevUpperBoundVariableOffset] = PrevUB;
}
void setDistInc(Expr *DistInc) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[DistIncOffset] = DistInc;
}
void setPrevEnsureUpperBound(Expr *PrevEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevEnsureUpperBoundOffset] = PrevEUB;
}
void setCombinedLowerBoundVariable(Expr *CombLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedLowerBoundVariableOffset] = CombLB;
}
void setCombinedUpperBoundVariable(Expr *CombUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedUpperBoundVariableOffset] = CombUB;
}
void setCombinedEnsureUpperBound(Expr *CombEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedEnsureUpperBoundOffset] = CombEUB;
}
void setCombinedInit(Expr *CombInit) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedInitOffset] = CombInit;
}
void setCombinedCond(Expr *CombCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedConditionOffset] = CombCond;
}
void setCombinedNextLowerBound(Expr *CombNLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedNextLowerBoundOffset] = CombNLB;
}
void setCombinedNextUpperBound(Expr *CombNUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedNextUpperBoundOffset] = CombNUB;
}
void setCombinedDistCond(Expr *CombDistCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
Data->getChildren()[CombinedDistConditionOffset] = CombDistCond;
}
void setCombinedParForInDistCond(Expr *CombParForInDistCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
Data->getChildren()[CombinedParForInDistConditionOffset] =
CombParForInDistCond;
}
void setCounters(ArrayRef<Expr *> A);
void setPrivateCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
void setDependentCounters(ArrayRef<Expr *> A);
void setDependentInits(ArrayRef<Expr *> A);
void setFinalsConditions(ArrayRef<Expr *> A);
public:
Expr *getIterationVariable() const {
return cast<Expr>(Data->getChildren()[IterationVariableOffset]);
}
Expr *getLastIteration() const {
return cast<Expr>(Data->getChildren()[LastIterationOffset]);
}
Expr *getCalcLastIteration() const {
return cast<Expr>(Data->getChildren()[CalcLastIterationOffset]);
}
Expr *getPreCond() const {
return cast<Expr>(Data->getChildren()[PreConditionOffset]);
}
Expr *getCond() const { return cast<Expr>(Data->getChildren()[CondOffset]); }
Expr *getInit() const { return cast<Expr>(Data->getChildren()[InitOffset]); }
Expr *getInc() const { return cast<Expr>(Data->getChildren()[IncOffset]); }
const Stmt *getPreInits() const {
return Data->getChildren()[PreInitsOffset];
}
Stmt *getPreInits() { return Data->getChildren()[PreInitsOffset]; }
Expr *getIsLastIterVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[IsLastIterVariableOffset]);
}
Expr *getLowerBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[LowerBoundVariableOffset]);
}
Expr *getUpperBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[UpperBoundVariableOffset]);
}
Expr *getStrideVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[StrideVariableOffset]);
}
Expr *getEnsureUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[EnsureUpperBoundOffset]);
}
Expr *getNextLowerBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NextLowerBoundOffset]);
}
Expr *getNextUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NextUpperBoundOffset]);
}
Expr *getNumIterations() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NumIterationsOffset]);
}
Expr *getPrevLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevLowerBoundVariableOffset]);
}
Expr *getPrevUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevUpperBoundVariableOffset]);
}
Expr *getDistInc() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[DistIncOffset]);
}
Expr *getPrevEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevEnsureUpperBoundOffset]);
}
Expr *getCombinedLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedLowerBoundVariableOffset]);
}
Expr *getCombinedUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedUpperBoundVariableOffset]);
}
Expr *getCombinedEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedEnsureUpperBoundOffset]);
}
Expr *getCombinedInit() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedInitOffset]);
}
Expr *getCombinedCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedConditionOffset]);
}
Expr *getCombinedNextLowerBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedNextLowerBoundOffset]);
}
Expr *getCombinedNextUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedNextUpperBoundOffset]);
}
Expr *getCombinedDistCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
return cast<Expr>(Data->getChildren()[CombinedDistConditionOffset]);
}
Expr *getCombinedParForInDistCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
return cast<Expr>(Data->getChildren()[CombinedParForInDistConditionOffset]);
}
Stmt *getBody();
const Stmt *getBody() const {
return const_cast<OMPLoopDirective *>(this)->getBody();
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> private_counters() { return getPrivateCounters(); }
ArrayRef<Expr *> private_counters() const {
return const_cast<OMPLoopDirective *>(this)->getPrivateCounters();
}
ArrayRef<Expr *> inits() { return getInits(); }
ArrayRef<Expr *> inits() const {
return const_cast<OMPLoopDirective *>(this)->getInits();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); }
ArrayRef<Expr *> dependent_counters() const {
return const_cast<OMPLoopDirective *>(this)->getDependentCounters();
}
ArrayRef<Expr *> dependent_inits() { return getDependentInits(); }
ArrayRef<Expr *> dependent_inits() const {
return const_cast<OMPLoopDirective *>(this)->getDependentInits();
}
ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); }
ArrayRef<Expr *> finals_conditions() const {
return const_cast<OMPLoopDirective *>(this)->getFinalsConditions();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTaskLoopDirectiveClass ||
T->getStmtClass() == OMPTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeSimdDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass ||
T->getStmtClass() ==
OMPTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(getLoopsNumber(),
llvm::omp::OMPD_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs,
Expr *TaskRedRef, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPForDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSectionsDirectiveClass,
llvm::omp::OMPD_sections, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSectionsDirective()
: OMPExecutableDirective(OMPSectionsDirectiveClass,
llvm::omp::OMPD_sections, SourceLocation(),
SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPSectionsDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSectionDirectiveClass,
llvm::omp::OMPD_section, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(OMPSectionDirectiveClass,
llvm::omp::OMPD_section, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSingleDirective()
: OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Name of the directive.
DeclarationNameInfo DirName;
/// Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc)
: OMPExecutableDirective(OMPCriticalDirectiveClass,
llvm::omp::OMPD_critical, StartLoc, EndLoc),
DirName(Name) {}
/// Build an empty directive.
///
explicit OMPCriticalDirective()
: OMPExecutableDirective(OMPCriticalDirectiveClass,
llvm::omp::OMPD_critical, SourceLocation(),
SourceLocation()) {}
/// Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current region has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForDirectiveClass,
llvm::omp::OMPD_parallel_for, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForDirectiveClass,
llvm::omp::OMPD_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(getLoopsNumber(),
llvm::omp::OMPD_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForSimdDirectiveClass,
llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForSimdDirectiveClass,
llvm::omp::OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp parallel master' directive.
///
/// \code
/// #pragma omp parallel master private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel master' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPParallelMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelMasterDirectiveClass,
llvm::omp::OMPD_parallel_master, StartLoc,
EndLoc) {}
explicit OMPParallelMasterDirective()
: OMPExecutableDirective(OMPParallelMasterDirectiveClass,
llvm::omp::OMPD_parallel_master,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
///
static OMPParallelMasterDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelMasterDirective *>(this)
->getTaskReductionRefExpr();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterDirectiveClass;
}
};
/// This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelSectionsDirectiveClass,
llvm::omp::OMPD_parallel_sections, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPParallelSectionsDirective()
: OMPExecutableDirective(OMPParallelSectionsDirectiveClass,
llvm::omp::OMPD_parallel_sections,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelSectionsDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if this directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskDirective()
: OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task,
SourceLocation(), SourceLocation()) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true, if current directive has inner cancel directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskyieldDirectiveClass,
llvm::omp::OMPD_taskyield, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(OMPTaskyieldDirectiveClass,
llvm::omp::OMPD_taskyield, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPBarrierDirectiveClass,
llvm::omp::OMPD_barrier, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(OMPBarrierDirectiveClass,
llvm::omp::OMPD_barrier, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskwaitDirectiveClass,
llvm::omp::OMPD_taskwait, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(OMPTaskwaitDirectiveClass,
llvm::omp::OMPD_taskwait, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskgroupDirectiveClass,
llvm::omp::OMPD_taskgroup, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskgroupDirective()
: OMPExecutableDirective(OMPTaskgroupDirectiveClass,
llvm::omp::OMPD_taskgroup, SourceLocation(),
SourceLocation()) {}
/// Sets the task_reduction return variable.
void setReductionRef(Expr *RR) { Data->getChildren()[0] = RR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param ReductionRef Reference to the task_reduction return variable.
///
static OMPTaskgroupDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
Expr *ReductionRef);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns reference to the task_reduction return variable.
const Expr *getReductionRef() const {
return const_cast<OMPTaskgroupDirective *>(this)->getReductionRef();
}
Expr *getReductionRef() { return cast_or_null<Expr>(Data->getChildren()[0]); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPFlushDirective()
: OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// This represents '#pragma omp depobj' directive.
///
/// \code
/// #pragma omp depobj(a) depend(in:x,y)
/// \endcode
/// In this example directive '#pragma omp depobj' initializes a depobj object
/// 'a' with dependence type 'in' and a list with 'x' and 'y' locators.
class OMPDepobjDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPDepobjDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPDepobjDirective()
: OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPDepobjDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPDepobjDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDepobjDirectiveClass;
}
};
/// This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPOrderedDirectiveClass,
llvm::omp::OMPD_ordered, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPOrderedDirective()
: OMPExecutableDirective(OMPOrderedDirectiveClass,
llvm::omp::OMPD_ordered, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
/// \param IsStandalone true, if the the standalone directive is created.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
bool IsStandalone, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart = false;
/// Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPAtomicDirective()
: OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic,
SourceLocation(), SourceLocation()) {}
/// Set 'x' part of the associated expression/statement.
void setX(Expr *X) { Data->getChildren()[0] = X; }
/// Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { Data->getChildren()[1] = UE; }
/// Set 'v' part of the associated expression/statement.
void setV(Expr *V) { Data->getChildren()[2] = V; }
/// Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { Data->getChildren()[3] = E; }
public:
/// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(Data->getChildren()[0]); }
const Expr *getX() const {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
/// Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() { return cast_or_null<Expr>(Data->getChildren()[1]); }
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(Data->getChildren()[1]);
}
/// Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(Data->getChildren()[2]); }
const Expr *getV() const {
return cast_or_null<Expr>(Data->getChildren()[2]);
}
/// Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(Data->getChildren()[3]); }
const Expr *getExpr() const {
return cast_or_null<Expr>(Data->getChildren()[3]);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetDirective()
: OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// This represents '#pragma omp target data' directive.
///
/// \code
/// #pragma omp target data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target data' has clauses 'device'
/// with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetDataDirectiveClass,
llvm::omp::OMPD_target_data, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetDataDirective()
: OMPExecutableDirective(OMPTargetDataDirectiveClass,
llvm::omp::OMPD_target_data, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDataDirectiveClass;
}
};
/// This represents '#pragma omp target enter data' directive.
///
/// \code
/// #pragma omp target enter data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target enter data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetEnterDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetEnterDataDirectiveClass,
llvm::omp::OMPD_target_enter_data, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetEnterDataDirective()
: OMPExecutableDirective(OMPTargetEnterDataDirectiveClass,
llvm::omp::OMPD_target_enter_data,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetEnterDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetEnterDataDirectiveClass;
}
};
/// This represents '#pragma omp target exit data' directive.
///
/// \code
/// #pragma omp target exit data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target exit data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetExitDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetExitDataDirectiveClass,
llvm::omp::OMPD_target_exit_data, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetExitDataDirective()
: OMPExecutableDirective(OMPTargetExitDataDirectiveClass,
llvm::omp::OMPD_target_exit_data,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetExitDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetExitDataDirectiveClass;
}
};
/// This represents '#pragma omp target parallel' directive.
///
/// \code
/// #pragma omp target parallel if(a)
/// \endcode
/// In this example directive '#pragma omp target parallel' has clause 'if' with
/// condition 'a'.
///
class OMPTargetParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetParallelDirectiveClass,
llvm::omp::OMPD_target_parallel, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetParallelDirective()
: OMPExecutableDirective(OMPTargetParallelDirectiveClass,
llvm::omp::OMPD_target_parallel,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetParallelDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for' directive.
///
/// \code
/// #pragma omp target parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp target parallel for' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPTargetParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current region has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForDirectiveClass,
llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForDirectiveClass,
llvm::omp::OMPD_target_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPTargetParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForDirectiveClass;
}
};
/// This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTeamsDirective()
: OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// statements and child expressions.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPCancellationPointDirectiveClass,
llvm::omp::OMPD_cancellation_point, StartLoc,
EndLoc) {}
/// Build an empty directive.
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(OMPCancellationPointDirectiveClass,
llvm::omp::OMPD_cancellation_point,
SourceLocation(), SourceLocation()) {}
/// Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPCancelDirective()
: OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel,
SourceLocation(), SourceLocation()) {}
/// Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPCancelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
/// This represents '#pragma omp taskloop' directive.
///
/// \code
/// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp taskloop simd' directive.
///
/// \code
/// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop simd' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_taskloop_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp master taskloop' directive.
///
/// \code
/// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp master taskloop' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_master_taskloop, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp master taskloop simd' directive.
///
/// \code
/// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp master taskloop simd' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_master_taskloop_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPMasterTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \p NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp parallel master taskloop' directive.
///
/// \code
/// #pragma omp parallel master taskloop private(a,b) grainsize(val)
/// num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp parallel master taskloop' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp parallel master taskloop simd' directive.
///
/// \code
/// #pragma omp parallel master taskloop simd private(a,b) grainsize(val)
/// num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp parallel master taskloop simd' has
/// clauses 'private' with the variables 'a' and 'b', 'grainsize' with
/// expression 'val' and 'num_tasks' with expression 'num'.
///
class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelMasterTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterTaskLoopSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute' directive.
///
/// \code
/// #pragma omp distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute' has clauses 'private'
/// with the variables 'a' and 'b'
///
class OMPDistributeDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeDirectiveClass,
llvm::omp::OMPD_distribute, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeDirectiveClass,
llvm::omp::OMPD_distribute, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target update' directive.
///
/// \code
/// #pragma omp target update to(a) from(b) device(1)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to' with
/// argument 'a', clause 'from' with argument 'b' and clause 'device' with
/// argument '1'.
///
class OMPTargetUpdateDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetUpdateDirectiveClass,
llvm::omp::OMPD_target_update, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetUpdateDirective()
: OMPExecutableDirective(OMPTargetUpdateDirectiveClass,
llvm::omp::OMPD_target_update, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetUpdateDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses The number of clauses.
///
static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetUpdateDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for' has clause
/// 'private' with the variables 'a' and 'b'
///
class OMPDistributeParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for simd' has
/// clause 'private' with the variables 'x'
///
class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeParallelForSimdDirective *Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForSimdDirective *CreateEmpty(
const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute simd' composite directive.
///
/// \code
/// #pragma omp distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute simd' has clause
/// 'private' with the variables 'x'
///
class OMPDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeSimdDirectiveClass,
llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeSimdDirectiveClass,
llvm::omp::OMPD_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for simd' directive.
///
/// \code
/// #pragma omp target parallel for simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target parallel for simd' has clauses
/// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen'
/// with the variable 'c'.
///
class OMPTargetParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target simd' directive.
///
/// \code
/// #pragma omp target simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target simd' has clauses 'private'
/// with the variable 'a', 'map' with the variable 'b' and 'safelen' with
/// the variable 'c'.
///
class OMPTargetSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetSimdDirectiveClass,
llvm::omp::OMPD_target_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetSimdDirectiveClass,
llvm::omp::OMPD_target_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute' directive.
///
/// \code
/// #pragma omp teams distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeDirectiveClass,
llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeDirectiveClass,
llvm::omp::OMPD_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute simd'
/// combined directive.
///
/// \code
/// #pragma omp teams distribute simd private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute simd'
/// has clause 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for simd'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTeamsDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams' directive.
///
/// \code
/// #pragma omp target teams if(a>0)
/// \endcode
/// In this example directive '#pragma omp target teams' has clause 'if' with
/// condition 'a>0'.
///
class OMPTargetTeamsDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetTeamsDirectiveClass,
llvm::omp::OMPD_target_teams, StartLoc, EndLoc) {
}
/// Build an empty directive.
///
explicit OMPTargetTeamsDirective()
: OMPExecutableDirective(OMPTargetTeamsDirectiveClass,
llvm::omp::OMPD_target_teams, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetTeamsDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute' combined directive.
///
/// \code
/// #pragma omp target teams distribute private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute' has clause
/// 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass,
llvm::omp::OMPD_target_teams_distribute, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass,
llvm::omp::OMPD_target_teams_distribute,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(),
llvm::omp::OMPD_target_teams_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(),
llvm::omp::OMPD_target_teams_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetTeamsDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for simd'
/// combined directive.
///
/// \code
/// #pragma omp target teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for simd' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeParallelForSimdDirective(
unsigned CollapsedNum)
: OMPLoopDirective(
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute simd' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute simd'
/// has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
/// This represents the '#pragma omp tile' loop transformation directive.
class OMPTileDirective final : public OMPLoopBasedDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Default list of offsets.
enum {
PreInitsOffset = 0,
TransformedStmtOffset,
};
explicit OMPTileDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumLoops)
: OMPLoopBasedDirective(OMPTileDirectiveClass, llvm::omp::OMPD_tile,
StartLoc, EndLoc, NumLoops) {}
void setPreInits(Stmt *PreInits) {
Data->getChildren()[PreInitsOffset] = PreInits;
}
void setTransformedStmt(Stmt *S) {
Data->getChildren()[TransformedStmtOffset] = S;
}
public:
/// Create a new AST node representation for '#pragma omp tile'.
///
/// \param C Context of the AST.
/// \param StartLoc Location of the introducer (e.g. the 'omp' token).
/// \param EndLoc Location of the directive's end (e.g. the tok::eod).
/// \param Clauses The directive's clauses.
/// \param NumLoops Number of associated loops (number of items in the
/// 'sizes' clause).
/// \param AssociatedStmt The outermost associated loop.
/// \param TransformedStmt The loop nest after tiling, or nullptr in
/// dependent contexts.
/// \param PreInits Helper preinits statements for the loop nest.
static OMPTileDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
unsigned NumLoops, Stmt *AssociatedStmt,
Stmt *TransformedStmt, Stmt *PreInits);
/// Build an empty '#pragma omp tile' AST node for deserialization.
///
/// \param C Context of the AST.
/// \param NumClauses Number of clauses to allocate.
/// \param NumLoops Number of associated loops to allocate.
static OMPTileDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned NumLoops);
unsigned getNumAssociatedLoops() const { return getLoopsNumber(); }
/// Gets/sets the associated loops after tiling.
///
/// This is in de-sugared format stored as a CompoundStmt.
///
/// \code
/// for (...)
/// ...
/// \endcode
///
/// Note that if the generated loops a become associated loops of another
/// directive, they may need to be hoisted before them.
Stmt *getTransformedStmt() const {
return Data->getChildren()[TransformedStmtOffset];
}
/// Return preinits statement.
Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTileDirectiveClass;
}
};
/// This represents '#pragma omp scan' directive.
///
/// \code
/// #pragma omp scan inclusive(a)
/// \endcode
/// In this example directive '#pragma omp scan' has clause 'inclusive' with
/// list item 'a'.
class OMPScanDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPScanDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPScanDirective()
: OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPScanDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPScanDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPScanDirectiveClass;
}
};
/// This represents '#pragma omp interop' directive.
///
/// \code
/// #pragma omp interop init(target:obj) device(x) depend(inout:y) nowait
/// \endcode
/// In this example directive '#pragma omp interop' has
/// clauses 'init', 'device', 'depend' and 'nowait'.
///
class OMPInteropDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive.
/// \param EndLoc Ending location of the directive.
///
OMPInteropDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPInteropDirectiveClass,
llvm::omp::OMPD_interop, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPInteropDirective()
: OMPExecutableDirective(OMPInteropDirectiveClass,
llvm::omp::OMPD_interop, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses The directive's clauses.
///
static OMPInteropDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPInteropDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPInteropDirectiveClass;
}
};
/// This represents '#pragma omp dispatch' directive.
///
/// \code
/// #pragma omp dispatch device(dnum)
/// \endcode
/// This example shows a directive '#pragma omp dispatch' with a
/// device clause with variable 'dnum'.
///
class OMPDispatchDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// The location of the target-call.
SourceLocation TargetCallLoc;
/// Set the location of the target-call.
void setTargetCallLoc(SourceLocation Loc) { TargetCallLoc = Loc; }
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPDispatchDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPDispatchDirectiveClass,
llvm::omp::OMPD_dispatch, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPDispatchDirective()
: OMPExecutableDirective(OMPDispatchDirectiveClass,
llvm::omp::OMPD_dispatch, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TargetCallLoc Location of the target-call.
///
static OMPDispatchDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
SourceLocation TargetCallLoc);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPDispatchDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Return location of target-call.
SourceLocation getTargetCallLoc() const { return TargetCallLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDispatchDirectiveClass;
}
};
/// This represents '#pragma omp masked' directive.
/// \code
/// #pragma omp masked filter(tid)
/// \endcode
/// This example shows a directive '#pragma omp masked' with a filter clause
/// with variable 'tid'.
///
class OMPMaskedDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMaskedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPMaskedDirective()
: OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMaskedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPMaskedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMaskedDirectiveClass;
}
};
} // end namespace clang
#endif
|
laplace_mp.twoTargetsBasic.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
// grid size
#define GRIDY 2048
#define GRIDX 2048
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
// smallest permitted change in temperature
#define MAX_TEMP_ERROR 0.02
double T_new[GRIDX+2][GRIDY+2]; // temperature grid
double T[GRIDX+2][GRIDY+2]; // temperature grid from last iteration
// initialisation routine
void init();
int main(int argc, char *argv[]) {
int i, j; // grid indexes
int max_iterations; // maximal number of iterations
int iteration=1; // iteration
double dt=100; // largest change in temperature
struct timeval start_time, stop_time, elapsed_time; // timers
if(argc!=2) {
printf("Usage: %s number_of_iterations\n",argv[0]);
exit(1);
} else {
max_iterations=atoi(argv[1]);
}
gettimeofday(&start_time,NULL);
init();
#pragma omp target data map(tofrom:T) map(alloc:T_new)
// simulation iterations
while ( dt > MAX_TEMP_ERROR && iteration <= max_iterations ) {
// reset dt
dt = 0.0;
// main computational kernel, average over neighbours in the grid
#pragma omp target
#pragma omp teams distribute parallel for collapse(2)
for(i = 1; i <= GRIDX; i++)
for(j = 1; j <= GRIDY; j++)
T_new[i][j] = 0.25 * (T[i+1][j] + T[i-1][j] +
T[i][j+1] + T[i][j-1]);
// compute the largest change and copy T_new to T
#pragma omp target map(dt)
#pragma omp teams distribute parallel for collapse(2) reduction(max:dt)
for(i = 1; i <= GRIDX; i++){
for(j = 1; j <= GRIDY; j++){
dt = MAX( fabs(T_new[i][j]-T[i][j]), dt);
T[i][j] = T_new[i][j];
}
}
// periodically print largest change
if((iteration % 100) == 0)
printf("Iteration %4.0d, dt %f\n",iteration,dt);
iteration++;
}
gettimeofday(&stop_time,NULL);
timersub(&stop_time, &start_time, &elapsed_time); // measure time
printf("Total time was %f seconds.\n", elapsed_time.tv_sec+elapsed_time.tv_usec/1000000.0);
return 0;
}
// initialize grid and boundary conditions
void init(){
int i,j;
for(i = 0; i <= GRIDX+1; i++){
for (j = 0; j <= GRIDY+1; j++){
T[i][j] = 0.0;
}
}
// these boundary conditions never change throughout run
// set left side to 0 and right to a linear increase
for(i = 0; i <= GRIDX+1; i++) {
T[i][0] = 0.0;
T[i][GRIDY+1] = (128.0/GRIDX)*i;
}
// set top to 0 and bottom to linear increase
for(j = 0; j <= GRIDY+1; j++) {
T[0][j] = 0.0;
T[GRIDX+1][j] = (128.0/GRIDY)*j;
}
}
|
DataGen.h | // Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#pragma once
#include <boost/algorithm/string/predicate.hpp>
#include <cstring>
#include <memory>
#include <random>
#include <knowhere/index/vector_index/VecIndex.h>
#include <knowhere/index/vector_index/adapter/VectorAdapter.h>
#include <knowhere/index/vector_index/VecIndexFactory.h>
#include <knowhere/index/vector_index/IndexIVF.h>
#include "Constants.h"
#include "common/Schema.h"
#include "query/SearchOnIndex.h"
#include "segcore/SegmentGrowingImpl.h"
#include "segcore/SegmentSealedImpl.h"
#include "segcore/Utils.h"
#include "index/ScalarIndexSort.h"
#include "index/StringIndexSort.h"
using boost::algorithm::starts_with;
namespace milvus::segcore {
struct GeneratedData {
std::vector<idx_t> row_ids_;
std::vector<Timestamp> timestamps_;
InsertData* raw_;
std::vector<FieldId> field_ids;
SchemaPtr schema_;
template <typename T>
std::vector<T>
get_col(FieldId field_id) const {
std::vector<T> ret(raw_->num_rows());
for (auto target_field_data : raw_->fields_data()) {
if (field_id.get() != target_field_data.field_id()) {
continue;
}
auto& field_meta = schema_->operator[](field_id);
if (field_meta.is_vector()) {
if (field_meta.get_data_type() == DataType::VECTOR_FLOAT) {
int len = raw_->num_rows() * field_meta.get_dim();
ret.resize(len);
auto src_data =
reinterpret_cast<const T*>(target_field_data.vectors().float_vector().data().data());
std::copy_n(src_data, len, ret.data());
} else if (field_meta.get_data_type() == DataType::VECTOR_BINARY) {
int len = raw_->num_rows() * (field_meta.get_dim() / 8);
ret.resize(len);
auto src_data = reinterpret_cast<const T*>(target_field_data.vectors().binary_vector().data());
std::copy_n(src_data, len, ret.data());
} else {
PanicInfo("unsupported");
}
return std::move(ret);
}
switch (field_meta.get_data_type()) {
case DataType::BOOL: {
auto src_data = reinterpret_cast<const T*>(target_field_data.scalars().bool_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::INT8:
case DataType::INT16:
case DataType::INT32: {
auto src_data =
reinterpret_cast<const int32_t*>(target_field_data.scalars().int_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::INT64: {
auto src_data = reinterpret_cast<const T*>(target_field_data.scalars().long_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::FLOAT: {
auto src_data = reinterpret_cast<const T*>(target_field_data.scalars().float_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::DOUBLE: {
auto src_data = reinterpret_cast<const T*>(target_field_data.scalars().double_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::VARCHAR: {
auto src_data = reinterpret_cast<const T*>(target_field_data.scalars().string_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
default: {
PanicInfo("unsupported");
}
}
}
return std::move(ret);
}
std::unique_ptr<DataArray>
get_col(FieldId field_id) const {
for (auto target_field_data : raw_->fields_data()) {
if (field_id.get() == target_field_data.field_id()) {
return std::make_unique<DataArray>(target_field_data);
}
}
PanicInfo("field id not find");
}
private:
GeneratedData() = default;
friend GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed, uint64_t ts_offset, int repeat_count);
};
inline GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed = 42, uint64_t ts_offset = 0, int repeat_count = 1) {
using std::vector;
std::default_random_engine er(seed);
std::normal_distribution<> distr(0, 1);
int offset = 0;
auto insert_data = std::make_unique<InsertData>();
auto insert_cols = [&insert_data](auto& data, int64_t count, auto& field_meta) {
auto array = milvus::segcore::CreateDataArrayFrom(data.data(), count, field_meta);
insert_data->mutable_fields_data()->AddAllocated(array.release());
};
for (auto field_id : schema->get_field_ids()) {
auto field_meta = schema->operator[](field_id);
switch (field_meta.get_data_type()) {
case DataType::VECTOR_FLOAT: {
auto dim = field_meta.get_dim();
vector<float> final(dim * N);
bool is_ip = starts_with(field_meta.get_name().get(), "normalized");
#pragma omp parallel for
for (int n = 0; n < N; ++n) {
vector<float> data(dim);
float sum = 0;
std::default_random_engine er2(seed + n);
std::normal_distribution<> distr2(0, 1);
for (auto& x : data) {
x = distr2(er2) + offset;
sum += x * x;
}
if (is_ip) {
sum = sqrt(sum);
for (auto& x : data) {
x /= sum;
}
}
std::copy(data.begin(), data.end(), final.begin() + dim * n);
}
insert_cols(final, N, field_meta);
break;
}
case DataType::VECTOR_BINARY: {
auto dim = field_meta.get_dim();
Assert(dim % 8 == 0);
vector<uint8_t> data(dim / 8 * N);
for (auto& x : data) {
x = er();
}
insert_cols(data, N, field_meta);
break;
}
case DataType::INT64: {
vector<int64_t> data(N);
for (int i = 0; i < N; i++) {
data[i] = i / repeat_count;
}
insert_cols(data, N, field_meta);
break;
}
case DataType::INT32: {
vector<int> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::INT16: {
vector<int16_t> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::INT8: {
vector<int8_t> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::FLOAT: {
vector<float> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::DOUBLE: {
vector<double> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::VARCHAR: {
vector<std::string> data(N);
for (int i = 0; i < N / repeat_count; i++) {
auto str = std::to_string(er());
for (int j = 0; j < repeat_count; j++) {
data[i * repeat_count + j] = str;
}
}
insert_cols(data, N, field_meta);
break;
}
default: {
throw std::runtime_error("unimplemented");
}
}
++offset;
}
GeneratedData res;
res.schema_ = schema;
res.raw_ = insert_data.release();
res.raw_->set_num_rows(N);
for (int i = 0; i < N; ++i) {
res.row_ids_.push_back(i);
res.timestamps_.push_back(i + ts_offset);
}
return res;
}
inline auto
CreatePlaceholderGroup(int64_t num_queries, int dim, int64_t seed = 42) {
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
std::normal_distribution<double> dis(0, 1);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(dis(e));
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreatePlaceholderGroupFromBlob(int64_t num_queries, int dim, const float* src) {
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
int64_t src_index = 0;
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(src[src_index++]);
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(e());
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroupFromBlob(int64_t num_queries, int64_t dim, const uint8_t* ptr) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::milvus;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(*ptr);
++ptr;
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline json
SearchResultToJson(const SearchResult& sr) {
int64_t num_queries = sr.num_queries_;
int64_t topk = sr.topk_;
std::vector<std::vector<std::string>> results;
for (int q = 0; q < num_queries; ++q) {
std::vector<std::string> result;
for (int k = 0; k < topk; ++k) {
int index = q * topk + k;
result.emplace_back(std::to_string(sr.seg_offsets_[index]) + "->" + std::to_string(sr.distances_[index]));
}
results.emplace_back(std::move(result));
}
return json{results};
};
inline void
SealedLoader(const GeneratedData& dataset, SegmentSealed& seg) {
// TODO
auto row_count = dataset.row_ids_.size();
{
LoadFieldDataInfo info;
FieldMeta field_meta(FieldName("RowID"), RowFieldID, DataType::INT64);
auto array = CreateScalarDataArrayFrom(dataset.row_ids_.data(), row_count, field_meta);
info.field_data = array.release();
info.row_count = dataset.row_ids_.size();
info.field_id = RowFieldID.get(); // field id for RowId
seg.LoadFieldData(info);
}
{
LoadFieldDataInfo info;
FieldMeta field_meta(FieldName("Timestamp"), TimestampFieldID, DataType::INT64);
auto array = CreateScalarDataArrayFrom(dataset.timestamps_.data(), row_count, field_meta);
info.field_data = array.release();
info.row_count = dataset.timestamps_.size();
info.field_id = TimestampFieldID.get();
seg.LoadFieldData(info);
}
for (auto field_data : dataset.raw_->fields_data()) {
LoadFieldDataInfo info;
info.field_id = field_data.field_id();
info.row_count = row_count;
info.field_data = &field_data;
seg.LoadFieldData(info);
}
}
inline std::unique_ptr<SegmentSealed>
SealedCreator(SchemaPtr schema, const GeneratedData& dataset, const LoadIndexInfo& index_info) {
auto segment = CreateSealedSegment(schema);
SealedLoader(dataset, *segment);
segment->LoadIndex(index_info);
return segment;
}
inline knowhere::VecIndexPtr
GenIndexing(int64_t N, int64_t dim, const float* vec) {
// {knowhere::IndexParams::nprobe, 10},
auto conf = knowhere::Config{{knowhere::meta::DIM, dim},
{knowhere::IndexParams::nlist, 1024},
{knowhere::Metric::TYPE, knowhere::Metric::L2},
{knowhere::meta::DEVICEID, 0}};
auto database = knowhere::GenDataset(N, dim, vec);
auto indexing = std::make_shared<knowhere::IVF>();
indexing->Train(database, conf);
indexing->AddWithoutIds(database, conf);
return indexing;
}
template <typename T>
inline scalar::IndexBasePtr
GenScalarIndexing(int64_t N, const T* data) {
if constexpr (std::is_same_v<T, std::string>) {
auto indexing = scalar::CreateStringIndexSort();
indexing->Build(N, data);
return indexing;
} else {
auto indexing = scalar::CreateScalarIndexSort<T>();
indexing->Build(N, data);
return indexing;
}
}
} // namespace milvus::segcore
|
mixed_tentusscher_myo_epi_2004_S3_12.c | // Scenario 3 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt + Rc)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S3_12.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.6659643096818,0.00126342859487541,0.782100262764972,0.781978091983927,0.000172373983216983,0.486107163112887,0.00291990687735242,0.999998380250089,1.90224754407316e-08,1.86658961737956e-05,0.999770013320589,1.00741339003808,0.999998449667419,3.76483988014377e-05,0.470997021889879,10.7143996824072,138.907396963001};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5122469946898,0.000289049813785555,0.000138099248415042,6.23108182726077e-05,0.235920184817430,0.137885432304245,0.171858104323313,4.54207690553048,0.0146701313967813,1.02382441517950,1099.87497959849,0.000601753938706630,0.327493680344098,0.0188930125219796,0.00506656306041461,4.49126756029006e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
2356.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (i, j, k) num_threads(#P11)
{
/* E := A*B */
#pragma omp parallel for simd
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
#pragma omp parallel for simd
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp target teams distribute
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
#pragma omp parallel for simd
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
Searching.202008061153.less_sync.h | //
// Created by Zhen Peng on 8/6/2020.
//
#ifndef BATCH_SEARCHING_SEARCHING_H
#define BATCH_SEARCHING_SEARCHING_H
#include <vector>
#include <boost/dynamic_bitset.hpp>
//#include <boost/sort/sort.hpp>
#include <iostream>
#include <fstream>
#include <unordered_map>
#include <immintrin.h>
#include <cstring>
#include <unordered_set>
#include <set>
#include <cfloat>
#include <algorithm>
//#include <omp.h>
#include "../include/definitions.h"
//#include "../include/efanna2e/neighbor.h"
#include "../include/utils.h"
#include "../include/Candidate.h"
#include "../include/parallelization.h"
#include "../include/bitvector.h"
namespace PANNS {
class Searching {
//private:
public:
idi num_v_ = 0;
edgei num_e_ = 0;
idi num_queries_ = 0;
uint64_t dimension_ = 0;
idi width_ = 0; // NSG largest degree
idi ep_ = 0; // Start point
// std::vector<dataf> data_load_;
// std::vector<dataf> queries_load_;
// std::vector< std::vector<dataf> > data_load_;
// std::vector< std::vector<dataf> > queries_load_;
// std::vector<distf> norms_;
dataf *data_load_ = nullptr;
dataf *queries_load_ = nullptr;
// dataf *norms_;
// std::vector< std::vector<idi> > nsg_graph_;
// idi *nsg_graph_indices_;
// idi *nsg_graph_out_edges_;
// std::vector< std::vector<idi> > edge_list_;
char *opt_nsg_graph_ = nullptr;
uint64_t data_bytes_;
uint64_t neighbor_bytes_;
uint64_t vertex_bytes_;
// For multithreads
int num_threads_ = 1;
// int num_real_threads_ = 1;
// int num_threads_intra_query_ = 1;
// int num_threads_inter_query_ = 1;
dataf compute_norm(
const dataf *data) const;
// idi vertex_id);
// const std::vector<PANNS::dataf> &data);
// size_t loc_start,
// idi dimension)
dataf compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<dataf> &d_data,
// const std::vector<dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const;
static idi add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_size,
const idi queue_capacity,
const PANNS::Candidate &cand);
static void add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_top, // The number of elements in queue, independent with queue_start
const idi queue_size); // The maximum capacity of queue, independent with queue_start.
static void insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue_base,
const idi insert_index,
const idi queue_start,
const idi queue_size);
static idi merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
static idi merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size);
idi merge_all_queues_para_array(
std::vector<Candidate> &set_L,
// std::vector<Candidate> &local_queues_array,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
// std::vector<Candidate> &set_L,
const idi L);
idi merge_queues_of_four(
std::vector<Candidate> &set_L,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
const idi group_id,
const idi local_queue_capacity,
const idi master_queue_capacity);
idi merge_all_queues_to_master(
std::vector<Candidate> &set_L,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
const idi local_queue_capacity,
const idi local_master_queue_capacity,
const idi master_queue_capacity,
const idi group_size);
idi master_top_m_to_groups(
std::vector<Candidate> &set_L,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
std::vector<idi> &top_m_candidates,
const std::vector<idi> &top_m_candidates_starts,
std::vector<idi> &top_m_candidates_sizes,
const idi k_uc,
idi &last_k,
const idi M,
const idi num_groups);
// const idi group_size);
public:
// For Profiling
// L3CacheMissRate cache_miss_kernel;
uint64_t count_distance_computation_ = 0;
// uint64_t count_add_to_queue_ = 0;
// uint64_t count_single_query_computation_ = 0;
// distf dist_min_ = 0;
// distf dist_max_ = 0;
// double time_merge_ = 0;
double time_gather_ = 0;
// double time_select_ = 0;
// double time_select_L_ = 0.0;
// double time_select_M_ = 0.0;
// double time_initialization_ = 0;
// double time_sequential_phase_ = 0;
// double time_parallel_phase_ = 0;
// double time_ending_ = 0.0;
// double time_assign_s_ = 0.0;
// double time_expand_ = 0.0;
// double time_pick_top_m_ = 0.0;
// double time_distance_computation_ = 0.0;
// double time_add_to_queue_ = 0.0;
// double time_insert_ = 0;
// double time_compare_minimum_ = 0;
// double time_memmove_ = 0;
// std::vector<double> time_memmove_list_;
// L3CacheMissRate profile_miss_rate;
// uint64_t number_local_elements_ = 0;
// std::vector<idi> L_ids_;
// std::vector<idi> M_ids_;
~Searching()
{
free(data_load_);
data_load_ = nullptr;
// free(queries_load_);
// _mm_free(data_load_);
free(queries_load_);
queries_load_ = nullptr;
// free(norms_);
// free(nsg_graph_indices_);
// free(nsg_graph_out_edges_);
free(opt_nsg_graph_);
opt_nsg_graph_ = nullptr;
}
void load_data_load(char *filename);
void load_queries_load(char *filename);
void load_nsg_graph(char *filename);
// void build_opt_graph();
void prepare_init_ids(
std::vector<unsigned> &init_ids,
const unsigned L) const;
void subsearch_with_top_m(
const idi value_M_max,
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &local_top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation);
void subsearch_top_m_for_one_iteration(
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation);
void seq_search_with_top_m_double_m(
const idi M_max,
const idi query_id,
const idi K,
const idi global_L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K);
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited);
idi expand_one_candidate(
idi cand_id,
const dataf *query_data,
const distf &dist_bound,
std::vector<Candidate> &set_L,
const idi local_queue_start,
idi &local_queue_size,
const idi &local_queue_capacity,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_computation);
void para_search_with_top_m_hierarchy_merge_v0(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_capacity, // Maximum size of local queue
const idi local_master_queue_capacity,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes, // Sizes of local queue
// std::vector< std::vector<idi> > &top_m_candidates_list, // every group has one top-M queue
std::vector<idi> &top_m_candidate,
const std::vector<idi> &top_m_candidates_starts,
std::vector<idi> &top_m_candidates_sizes,
boost::dynamic_bitset<> &is_visited,
const idi group_size, // Should be 4
const idi full_merge_freq);
void para_search_with_top_m_less_sync_v0(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_capacity, // Maximum size of local queue
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
const idi full_merge_freq,
const idi local_iter_bound);
void load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list);
void get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const;
}; // Class Searching
/**
* Input the data from the file.
* @param filename
*/
inline void Searching::load_data_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
data_load_,
num_v_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: data dimension " << dimension_
<< " is not equal to query dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input queries from the file.
* @param filename
*/
inline void Searching::load_queries_load(char *filename)
{
auto old_d = dimension_;
DiskIO::load_data(
filename,
queries_load_,
num_queries_,
dimension_);
if (old_d) {
if (old_d != dimension_) {
std::cerr << "Error: query dimension " << dimension_
<< " is not equal to data dimension " << old_d << "." << std::endl;
exit(EXIT_FAILURE);
}
}
}
/**
* Input the NSG graph from the file.
* Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp
* @param filename
*/
inline void Searching::load_nsg_graph(char *filename)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
std::cerr << "Error: cannot read file " << filename << " ." << std::endl;
exit(EXIT_FAILURE);
}
fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned));
fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned));
data_bytes_ = (1 + dimension_) * sizeof(dataf);
neighbor_bytes_ = (1 + width_) * sizeof(idi);
vertex_bytes_ = data_bytes_ + neighbor_bytes_;
opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_);
if (!opt_nsg_graph_) {
std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl;
exit(EXIT_FAILURE);
}
idi v_id = 0;
num_e_ = 0;
char *base_location = opt_nsg_graph_;
while (true) {
idi degree;
fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
if (fin.eof()) {
break;
}
num_e_ += degree;
// std::vector<idi> tmp_ngbrs(degree);
// fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned));
// Norm and data
distf norm = compute_norm(data_load_ + v_id * dimension_);
// distf norm = compute_norm(v_id);
std::memcpy(base_location, &norm, sizeof(distf)); // Norm
memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data
base_location += data_bytes_;
// Neighbors
memcpy(base_location, °ree, sizeof(idi)); // Number of neighbors
fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors
// memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned));
base_location += neighbor_bytes_;
++v_id;
}
if (v_id != num_v_) {
std::cerr << "Error: NSG data has " << v_id
<< " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
exit(EXIT_FAILURE);
}
free(data_load_);
data_load_ = nullptr;
// ////////////////////////
// idi v_id = 0;
// num_e_ = 0;
// while (true) {
// idi degree;
// fin.read(reinterpret_cast<char *>(°ree), sizeof(unsigned));
// if (fin.eof()) {
// break;
// }
// num_e_ += degree;
//
// std::vector<idi> ngbrs(degree);
// fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned));
//// nsg_graph_.push_back(ngbrs);
//// tmp_edge_list.push_back(ngbrs);
// edge_list_.push_back(ngbrs);
// ++v_id;
// }
// if (v_id != num_v_) {
// std::cerr << "Error: NSG data has " << v_id
// << " vertices, but origin data has " << num_v_ << " vertices." << std::endl;
// exit(EXIT_FAILURE);
// }
}
/**
* Load those true top-K neighbors (ground truth) of queries
* @param filename
* @param[out] true_nn_list
*/
inline void Searching::load_true_NN(
const char *filename,
std::vector< std::vector<idi> > &true_nn_list)
// unsigned &t_K)
{
std::ifstream fin(filename);
if (!fin.is_open()) {
fprintf(stderr, "Error: cannot open file %s\n", filename);
exit(EXIT_FAILURE);
}
idi t_query_num;
idi t_K;
// unsigned t_K;
fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num));
fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K));
// if (t_query_num != query_num) {
// fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n",
// query_num, t_query_num, filename);
// exit(EXIT_FAILURE);
// }
if (t_query_num < num_queries_) {
fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_);
exit(EXIT_FAILURE);
}
if (t_K < 100) {
fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
exit(EXIT_FAILURE);
}
// data = new unsigned[(size_t) t_query_num * (size_t) t_K];
true_nn_list.resize(t_query_num);
for (idi q_i = 0; q_i < t_query_num; ++q_i) {
true_nn_list[q_i].resize(t_K);
}
for (unsigned q_i = 0; q_i < t_query_num; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned n_i = 0; n_i < t_K; ++n_i) {
unsigned id;
float dist;
fin.read(reinterpret_cast<char *>(&id), sizeof(id));
fin.read(reinterpret_cast<char *>(&dist), sizeof(dist));
// data[offset + n_i] = id;
true_nn_list[q_i][n_i] = id;
}
}
fin.close();
}
inline void Searching::get_recall_for_all_queries(
const std::vector< std::vector<idi> > &true_nn_list,
const std::vector<std::vector<unsigned>> &set_K_list,
std::unordered_map<unsigned, double> &recalls) const
{
// if (t_K < 100) {
// fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K);
// exit(EXIT_FAILURE);
// }
if (true_nn_list[0].size() < 100) {
fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n");
exit(EXIT_FAILURE);
}
recalls[1] = 0.0;
recalls[5] = 0.0;
recalls[10] = 0.0;
recalls[20] = 0.0;
recalls[50] = 0.0;
recalls[100] = 0.0;
for (unsigned q_i = 0; q_i < num_queries_; ++q_i) {
// size_t offset = q_i * t_K;
for (unsigned top_i = 0; top_i < 100; ++top_i) {
unsigned true_id = true_nn_list[q_i][top_i];
for (unsigned n_i = 0; n_i < 100; ++n_i) {
if (set_K_list[q_i][n_i] == true_id) {
if (n_i < 1) recalls[1] += 1;
if (n_i < 5) recalls[5] += 1;
if (n_i < 10) recalls[10] += 1;
if (n_i < 20) recalls[20] += 1;
if (n_i < 50) recalls[50] += 1;
if (n_i < 100) recalls[100] += 1;
}
}
}
}
recalls[1] /= 1.0 * num_queries_;
recalls[5] /= 5.0 * num_queries_;
recalls[10] /= 10.0 * num_queries_;
recalls[20] /= 20.0 * num_queries_;
recalls[50] /= 50.0 * num_queries_;
recalls[100] /= 100.0 * num_queries_;
}
/**
* Prepare init_ids and flags, as they are constant for all queries.
* @param[out] init_ids
* @param L
*/
inline void Searching::prepare_init_ids(
std::vector<unsigned int> &init_ids,
const unsigned L) const
{
// idi num_ngbrs = get_out_degree(ep_);
// edgei edge_start = nsg_graph_indices_[ep_];
// // Store ep_'s neighbors as candidates
// idi tmp_l = 0;
// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) {
// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l];
// }
// std::unordered_set<idi> visited_ids;
boost::dynamic_bitset<> is_selected(num_v_);
idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
idi init_ids_end = 0;
// for (; tmp_l < L && tmp_l < out_degree; tmp_l++) {
for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) {
// idi v_id = out_edges[tmp_l];
idi v_id = out_edges[e_i];
if(is_selected[v_id]) {
continue;
}
is_selected[v_id] = true;
// init_ids[tmp_l] = v_id;
init_ids[init_ids_end++] = v_id;
// init_ids[tmp_l] = out_edges[tmp_l];
// visited_ids.insert(init_ids[tmp_l]);
}
// for (idi i = 0; i < tmp_l; ++i) {
// is_visited[init_ids[i]] = true;
// }
// If ep_'s neighbors are not enough, add other random vertices
idi tmp_id = ep_ + 1; // use tmp_id to replace rand().
while (init_ids_end < L) {
tmp_id %= num_v_;
idi v_id = tmp_id++;
if (is_selected[v_id]) {
continue;
}
// if (visited_ids.find(id) != visited_ids.end()) {
// continue;
// }
is_selected[v_id] = true;
// visited_ids.insert(id);
init_ids[init_ids_end++] = v_id;
// tmp_l++;
}
}
// TODO: re-code in AVX-512
inline dataf Searching::compute_norm(
const dataf *data) const
// idi vertex_id)
// const std::vector<PANNS::dataf> &data)
// size_t loc_start,
// idi dimension)
{
// const dataf *a = data.data() + loc_start;
// const dataf *a = data_load_ + vertex_id * dimension_;
// idi size = dimension_;
dataf result = 0;
//#define AVX_L2NORM(addr, dest, tmp) \
// tmp = _mm256_load_ps(addr); \
// tmp = _mm256_mul_ps(tmp, tmp); \
// dest = _mm256_add_ps(dest, tmp);
#define AVX_L2NORM(addr, dest, tmp) \
tmp = _mm256_loadu_ps(addr); \
tmp = _mm256_mul_ps(tmp, tmp); \
dest = _mm256_add_ps(dest, tmp);
__m256 sum;
__m256 l0, l1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = data;
const float *e_l = l + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_L2NORM(e_l, sum, l0); }
for (unsigned i = 0; i < DD; i += 16, l += 16) {
AVX_L2NORM(l, sum, l0);
AVX_L2NORM(l + 8, sum, l1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
return result;
}
inline dataf Searching::compute_distance_with_norm(
const dataf *v_data,
const dataf *q_data,
// idi vertex_id,
// idi query_id,
// const std::vector<PANNS::dataf> &d_data,
// const std::vector<PANNS::dataf> &q_data,
// PANNS::idi d_start,
// PANNS::idi q_start,
const dataf vertex_norm) const
// idi dimension)
{
// idi size = dimension_;
float result = 0;
//#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
// tmp1 = _mm256_load_ps(addr1);\
// tmp2 = _mm256_load_ps(addr2);\
// tmp1 = _mm256_mul_ps(tmp1, tmp2); \
// dest = _mm256_add_ps(dest, tmp1);
#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \
tmp1 = _mm256_loadu_ps(addr1);\
tmp2 = _mm256_loadu_ps(addr2);\
tmp1 = _mm256_mul_ps(tmp1, tmp2); \
dest = _mm256_add_ps(dest, tmp1);
__m256 sum;
__m256 l0, l1;
__m256 r0, r1;
unsigned D = (dimension_ + 7) & ~7U;
unsigned DR = D % 16;
unsigned DD = D - DR;
const float *l = v_data;
const float *r = q_data;
// const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf));
// const float *r = queries_load_ + query_id * dimension_;
const float *e_l = l + DD;
const float *e_r = r + DD;
float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0};
sum = _mm256_load_ps(unpack);
// sum = _mm256_loadu_ps(unpack);
if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); }
for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) {
AVX_DOT(l, r, sum, l0, r0);
AVX_DOT(l + 8, r + 8, sum, l1, r1);
}
_mm256_store_ps(unpack, sum);
// _mm256_storeu_ps(unpack, sum);
result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7];
result = -2 * result + vertex_norm;
return result;
}
//
// The difference from insert_into_queue is that add_into_queue will increase the queue size by 1.
// add_into_queue with a queue_start
inline idi Searching::add_into_queue(
std::vector<PANNS::Candidate> &queue,
const idi queue_start,
idi &queue_size, // The insertion location starting from queue_start
const idi queue_capacity, // The maximum capacity of queue, independent with queue_start.
const PANNS::Candidate &cand)
{
if (0 == queue_size) {
queue[queue_start + queue_size++] = cand;
return 0;
}
idi queue_end = queue_start + queue_size;
// Find the insert location
const auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand);
// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_size, cand);
idi insert_loc = it_loc - queue.begin();
if (insert_loc != queue_end) {
if (cand.id_ == it_loc->id_) {
// Duplicate
return queue_capacity;
}
if (queue_size >= queue_capacity) { // Queue is full
--queue_size;
--queue_end;
}
} else { // insert_loc == queue_end, insert at the end?
if (queue_size < queue_capacity) { // Queue is not full
// Insert at the end
queue[insert_loc] = cand;
++queue_size;
return queue_size - 1;
} else { // Queue is full
return queue_capacity;
}
}
// Add into queue
memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1),
reinterpret_cast<char *>(queue.data() + insert_loc),
(queue_end - insert_loc) * sizeof(Candidate));
queue[insert_loc] = cand;
++queue_size;
return insert_loc - queue_start;
}
inline void Searching::add_into_queue_at(
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index, // The insertion location, independent with queue_start
const idi queue_start,
idi &queue_size, // The number of elements in queue, independent with queue_start
const idi queue_length) // The maximum capacity of queue, independent with queue_start.
{
const idi dest_index = queue_start + insert_index;
if (queue_size == queue_length) {
--queue_size;
}
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index) * sizeof(Candidate));
queue[dest_index] = cand;
++queue_size;
}
inline void Searching::insert_one_element_at(
// const T &cand,
// T *queue_base,
const Candidate &cand,
std::vector<Candidate> &queue,
const idi insert_index,
const idi queue_start,
const idi queue_size)
{
const idi dest_index = queue_start + insert_index;
memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1),
reinterpret_cast<char *>(queue.data() + dest_index),
(queue_size - insert_index - 1) * sizeof(Candidate));
queue[dest_index] = cand;
// memmove(reinterpret_cast<char *>(queue_base + dest_index + 1),
// reinterpret_cast<char *>(queue_base + dest_index),
// (queue_size - insert_index - 1) * sizeof(T));
// for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) {
// queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start);
// }
// queue_base[dest_index] = cand;
}
/* Function:
* queue1_size is fixed.
*/
inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed(
std::vector<Candidate> &queue1,
const idi queue1_start,
const idi queue1_size,
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
return insert_index;
} else if (insert_index == queue1_size - 1) {
queue1[queue1_start + insert_index] = queue2[queue2_start];
return insert_index;
}
// Insert the 1st of queue2
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
insert_one_element_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size);
}
if (queue2_size == 1) {
return insert_index;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
const idi q_i_1_bound = queue1_start + queue1_size;
const idi q_i_2_bound = queue2_start + queue2_size;
// const idi insert_i_bound = queue1_start + limit_size;
for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) {
if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) {
// queue1 or queue2 finished traverse. Rest o
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
// Insert queue2[q_i_2] into queue1
insert_one_element_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size);
++q_i_1;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
return insert_index;
}
/* Function:
* queue1_size should be updated.
* queue1_length should be provided.
*/
inline idi Searching::merge_two_queues_into_1st_queue_seq_incr(
std::vector<Candidate> &queue1,
const idi queue1_start,
idi &queue1_size, // The number of element in queue1, independent with queue1_start.
const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start.
std::vector<Candidate> &queue2,
const idi queue2_start,
const idi queue2_size)
// const idi limit_size)
{
assert(queue1_size && queue2_size);
// Record the lowest insert location.
auto it_loc = std::lower_bound(
queue1.begin() + queue1_start,
queue1.begin() + queue1_start + queue1_size,
queue2[queue2_start]);
idi insert_index = it_loc - (queue1.begin() + queue1_start);
if (insert_index == queue1_size) {
idi copy_count = (queue1_size + queue2_size > queue1_length) ?
queue1_length - queue1_size :
queue2_size;
memmove(queue1.data() + queue1_start + queue1_size,
queue2.data() + queue2_start,
copy_count * sizeof(Candidate));
queue1_size += copy_count;
return insert_index;
}
if (queue2[queue2_start].id_ != it_loc->id_) {
// Not Duplicate
add_into_queue_at(
queue2[queue2_start],
queue1,
insert_index,
queue1_start,
queue1_size,
queue1_length);
}
if (queue2_size == 1) {
return insert_index;
}
// Insert
idi q_i_1 = insert_index + 1 + queue1_start;
idi q_i_2 = queue2_start + 1;
idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound.
const idi q_i_2_bound = queue2_start + queue2_size;
// idi insert_i;
for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) {
if (q_i_1 >= q_i_1_bound) {
queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2);
for ( ; insert_i < queue1_size; ++insert_i) {
queue1[queue1_start + insert_i] = queue2[q_i_2++];
}
break;
} else if (q_i_2 >= q_i_2_bound) {
break;
} else if (queue1[q_i_1] < queue2[q_i_2]) {
++q_i_1;
} else if (queue2[q_i_2] < queue1[q_i_1]) {
add_into_queue_at(
queue2[q_i_2++],
queue1,
insert_i,
queue1_start,
queue1_size,
queue1_length);
++q_i_1;
q_i_1_bound = queue1_start + queue1_size;
} else {
// Duplicate
++q_i_2;
++q_i_1;
}
}
return insert_index;
}
/* Function:
* Use large local_queues_array as a concatenation of all queues
*/
inline idi Searching::merge_all_queues_para_array(
std::vector<Candidate> &set_L,
std::vector<idi> &local_queues_ends,
const idi local_queue_length,
const idi L)
{
const int num_queues = num_threads_;
idi nk = L;
int size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
idi a_start = ai * local_queue_length;
idi bi = i + (1 << d) - 1; // i + 2^d - 1
idi b_start = bi * local_queue_length;
if (0 == local_queues_ends[bi]) {
continue;
}
if (local_queues_ends[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_ends[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_ends[ai] = local_queues_ends[bi];
local_queues_ends[bi] = 0;
continue;
}
if (ai != static_cast<idi>(num_queues - 1)) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_ends[ai],
local_queue_length,
set_L,
b_start,
local_queues_ends[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_fixed(
set_L,
a_start,
L,
set_L,
b_start,
local_queues_ends[bi]);
if (r < nk) {
nk = r;
}
}
}
}
// // Remain, prefix-sum-like merge
// if (size != num_queues) {
// for (int i = size; i < num_queues; ++i) {
// idi ai = i;
// idi a_start = ai * local_queue_length;
// idi bi = i - 1;
// idi b_start = bi * local_queue_length;
// if (0 == local_queues_ends[bi]) {
// continue;
// }
// if (local_queues_ends[ai] == 0) {
// std::copy(set_L.begin() + b_start,
// set_L.begin() + b_start + local_queues_ends[bi],
// set_L.begin() + a_start); // Copy bi to ai
// local_queues_ends[ai] = local_queues_ends[bi];
// local_queues_ends[bi] = 0;
// continue;
// }
// if (ai != static_cast<idi>(num_queues - 1)) {
// merge_two_queues_into_1st_queue_seq_incr(
// set_L,
// a_start,
// local_queues_ends[ai],
// local_queue_length,
// set_L,
// b_start,
// local_queues_ends[bi]);
// } else {
// idi r = merge_two_queues_into_1st_queue_seq_fixed(
// set_L,
// a_start,
// L,
// set_L,
// b_start,
// local_queues_ends[bi]);
// if (r < nk) {
// nk = r;
// }
// }
// }
// }
// Reset local_queues_ends
// Not do this for Collector Idea or Selecting Idea
std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0);
// std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0);
return nk;
// return r;
}
/*
* Function: merge 4 queues into the last queue
*/
inline idi Searching::merge_queues_of_four(
std::vector<Candidate> &set_L,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
const idi group_id,
const idi local_queue_capacity,
const idi master_queue_capacity)
{
// const int num_queues = 4;
const idi group_start = group_id * 4;
idi nk = master_queue_capacity;
#pragma omp parallel for num_threads(2)
for (int i = 0; i < 2; ++i) {
const idi bi = 2 * i + group_start;
const idi ai = bi + 1;
if (!local_queues_sizes[bi]) {
continue;
}
if (!local_queues_sizes[ai]) {
std::copy(
set_L.begin() + local_queues_starts[bi],
set_L.begin() + local_queues_starts[bi] + local_queues_sizes[bi],
set_L.begin() + local_queues_starts[ai]);
local_queues_sizes[ai] = local_queues_sizes[bi];
local_queues_sizes[bi] = 0;
continue;
}
if (ai != 3 + group_start) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
local_queues_starts[ai],
local_queues_sizes[ai],
local_queue_capacity,
set_L,
local_queues_starts[bi],
local_queues_sizes[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_incr(
set_L,
local_queues_starts[ai],
local_queues_sizes[ai],
master_queue_capacity,
set_L,
local_queues_starts[bi],
local_queues_sizes[bi]);
if (r < nk) {
nk = r;
}
}
local_queues_sizes[bi] = 0;
}
{
const idi bi = 1 + group_start;
const idi ai = 3 + group_start;
if (!local_queues_sizes[bi]) {
return nk;
}
if (!local_queues_sizes[ai]) {
std::copy(
set_L.begin() + local_queues_starts[bi],
set_L.begin() + local_queues_starts[bi] + local_queues_sizes[bi],
set_L.begin() + local_queues_starts[ai]);
local_queues_sizes[ai] = local_queues_sizes[bi];
local_queues_sizes[bi] = 0;
return 0;
}
idi r = merge_two_queues_into_1st_queue_seq_incr(
set_L,
local_queues_starts[ai],
local_queues_sizes[ai],
master_queue_capacity,
set_L,
local_queues_starts[bi],
local_queues_sizes[bi]);
if (r < nk) {
nk = r;
}
local_queues_sizes[bi] = 0;
}
return nk;
}
/*
* Function: used by hierarchical merging idea.
* Merge all queues into the last queue.
* Difference with merge_all_queues_para_array: here the last queue might not have L elements in the beginning,
* so use merge_two_queues_into_1st_queue_seq_incr(), not merge_two_queues_into_1st_queue_seq_fixed().
*/
inline idi Searching::merge_all_queues_to_master(
std::vector<Candidate> &set_L,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
const idi local_queue_capacity,
const idi local_master_queue_capacity,
const idi master_queue_capacity,
const idi group_size)
{
const idi num_queues = num_threads_;
idi nk = master_queue_capacity;
int size = num_queues;
// int size = 1 << (static_cast<idi>(log2(num_queues)));
idi log2size = static_cast<idi>(log2(size));
for (idi d = 0; d < log2size; ++d) {
uint32_t by = 1 << (d + 1);
#pragma omp parallel for
for (int i = 0; i < size; i += by) {
idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1
// idi a_start = ai * local_queue_capacity;
idi a_start = local_queues_starts[ai];
idi bi = i + (1 << d) - 1; // i + 2^d - 1
// idi b_start = bi * local_queue_capacity;
idi b_start = local_queues_starts[bi];
if (0 == local_queues_sizes[bi]) {
continue;
}
if (local_queues_sizes[ai] == 0) {
std::copy(set_L.begin() + b_start,
set_L.begin() + b_start + local_queues_sizes[bi],
set_L.begin() + a_start); // Copy bi to ai
local_queues_sizes[ai] = local_queues_sizes[bi];
local_queues_sizes[bi] = 0;
continue;
}
if ((group_size - 1) != ai % 4) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_sizes[ai],
local_queue_capacity,
set_L,
b_start,
local_queues_sizes[bi]);
} else if (num_queues - 1 != ai) {
merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_sizes[ai],
local_master_queue_capacity,
set_L,
b_start,
local_queues_sizes[bi]);
} else {
idi r = merge_two_queues_into_1st_queue_seq_incr(
set_L,
a_start,
local_queues_sizes[ai],
master_queue_capacity,
set_L,
b_start,
local_queues_sizes[bi]);
if (ai == num_queues - 1 && r < nk) {
nk = r;
}
}
// if ((group_size - 1) == ai % 4) {
// idi r = merge_two_queues_into_1st_queue_seq_incr(
// set_L,
// a_start,
// local_queues_sizes[ai],
// master_queue_capacity,
// set_L,
// b_start,
// local_queues_sizes[bi]);
// if (ai == num_queues - 1 && r < nk) {
// nk = r;
// }
// } else {
// merge_two_queues_into_1st_queue_seq_incr(
// set_L,
// a_start,
// local_queues_sizes[ai],
// local_queue_capacity,
// set_L,
// b_start,
// local_queues_sizes[bi]);
// }
// if (ai != num_queues - 1) {
// merge_two_queues_into_1st_queue_seq_incr(
// set_L,
// a_start,
// local_queues_sizes[ai],
// local_queue_capacity,
// set_L,
// b_start,
// local_queues_sizes[bi]);
// } else {
// idi r = merge_two_queues_into_1st_queue_seq_incr(
// set_L,
// a_start,
// local_queues_sizes[ai],
// master_queue_capacity,
// set_L,
// b_start,
// local_queues_sizes[bi]);
// if (r < nk) {
// nk = r;
// }
// }
local_queues_sizes[bi] = 0;
}
}
// Reset local_queues_sizes
// Not do this for Collector Idea or Selecting Idea
// std::fill(local_queues_sizes.begin(), local_queues_sizes.end() - 1, 0);
// std::fill(local_queues_sizes.begin(), local_queues_sizes.end(), 0);
return nk;
}
/*
* Function: distribute master queue's top-M unchecked elements to top_m_candidates.
* Used by hierarchical merging idea.
*/
inline idi Searching::master_top_m_to_groups(
std::vector<Candidate> &set_L,
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes,
std::vector<idi> &top_m_candidates,
const std::vector<idi> &top_m_candidates_starts,
std::vector<idi> &top_m_candidates_sizes,
const idi k_uc,
idi &last_k,
const idi M,
const idi num_groups)
// const idi group_size)
{
const idi last_queue_start = local_queues_starts[num_threads_ - 1];
idi c_i_start = k_uc + last_queue_start;
idi c_i_bound = last_queue_start + local_queues_sizes[num_threads_ - 1];
idi top_m_count = 0;
for (idi c_i = c_i_start; c_i < c_i_bound && top_m_count < M; ++c_i) {
if (set_L[c_i].is_checked_) {
continue;
}
last_k = c_i - last_queue_start;
set_L[c_i].is_checked_ = true;
idi g_i = top_m_count % num_groups;
++top_m_count;
top_m_candidates[top_m_candidates_starts[g_i] + top_m_candidates_sizes[g_i]++] = set_L[c_i].id_;
}
return top_m_count;
// idi m_i = 0;
// const idi master_start = local_queues_starts[num_threads_ - 1];
// const idi e_i_bound = local_queues_sizes[num_threads_ - 1];
// for (idi e_i = 0; e_i < e_i_bound; ++e_i) {
// idi group_id = e_i % num_groups;
// if (num_groups - 1 == group_id) {
// set_L[master_start + m_i++] = set_L[master_start + e_i];
// } else {
// idi q_id = group_id * group_size + group_size - 1;
// set_L[local_queues_starts[q_id] + local_queues_sizes[q_id]++] = set_L[master_start + e_i];
// }
// }
// local_queues_sizes[num_threads_ - 1] = m_i;
}
/*
* 6/22/2020-21:30
* Do searching on the local_set_L
* local_set_L is already sorted
* is_visited is already set up.
*/
inline void Searching::subsearch_with_top_m(
const idi value_M_max,
const idi query_id,
const idi local_L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &local_top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &local_count_distance_computation)
{
const dataf *query_data = queries_load_ + query_id * dimension_;
// idi local_top_m_candidates_end = 0;
idi k = 0; // Index of first unchecked candidate.
idi iter = 0;
idi M = 1; // value of M
while (k < local_L) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
M,
query_id,
query_data,
local_L,
set_L,
set_L_start,
set_L_size,
local_top_m_candidates,
is_visited,
local_count_distance_computation);
{// Scale M
if (M < value_M_max) {
M <<= 1;
}
// else {
// M = value_M_max;
// }
}
}
// {//test
// printf("set_L_start: %u "
// "local_count_distance_computation: %lu\n",
// set_L_start,
// local_count_distance_computation);
// }
}
/*
* 7/6/2020-23:17
* Subsearch only 1 iteration using top-m
*/
inline void Searching::subsearch_top_m_for_one_iteration(
const idi iter,
idi &k_uc,
const idi value_M,
const idi query_id,
const dataf *query_data,
const idi L,
std::vector<Candidate> &set_L,
const idi set_L_start,
idi &set_L_size,
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
uint64_t &count_distance_computation)
{
// Select M candidates
idi top_m_candidates_end = 0;
idi last_k = L;
// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition.
for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) {
idi index_set_L = c_i + set_L_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_;
}
idi nk = L;
// Push M candidates' neighbors into the queue.
for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) {
idi cand_id = top_m_candidates[c_i];
_mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
for (idi n_i = 0; n_i < out_degree; ++n_i) {
_mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
}
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++count_distance_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > set_L[set_L_size - 1 + set_L_start].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
idi r = add_into_queue(
set_L,
set_L_start,
set_L_size,
L,
cand);
if (r < nk) {
nk = r;
}
}
}
// top_m_candidates_end = 0; // Clear top_m_candidates
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
}
/*
* 7/31/2020-12:48
* Use for profile. Sequential Double-M.
*/
inline void Searching::seq_search_with_top_m_double_m(
const idi M_max,
const idi query_id,
const idi K,
const idi global_L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K)
// std::vector<idi> &top_m_candidates,
// boost::dynamic_bitset<> &is_visited)
{
// time_initialization_ -= WallTimer::get_time_mark();
std::vector<idi> top_m_candidates(M_max);
boost::dynamic_bitset<> is_visited(num_v_);
uint64_t tmp_count_computation = 0;
idi set_L_size;
{// Initialization
// is_visited flag array
//#pragma omp parallel for
// Cannot use OMP for bit array is_visited!
for (idi c_i = 0; c_i < global_L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
const dataf *query_data = queries_load_ + query_id * dimension_;
//#pragma omp parallel for
for (idi v_i = 0; v_i < global_L; ++v_i) {
idi v_id = init_ids[v_i];
_mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
}
// Get the distances of all candidates, store in the set set_L.
//#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi id_i = 0; id_i < global_L; ++id_i) {
idi v_id = init_ids[id_i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[id_i] = Candidate(v_id, dist, false); // False means not checked.
}
set_L_size = global_L;
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
std::sort(set_L.begin(), set_L.begin() + global_L);
}
// time_initialization_ += WallTimer::get_time_mark();
// Searching
subsearch_with_top_m(
M_max,
query_id,
global_L,
set_L,
0,
set_L_size,
top_m_candidates,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// time_merge_ -= WallTimer::get_time_mark();
// time_ending_ -= WallTimer::get_time_mark();
// time_merge_ += WallTimer::get_time_mark();
{
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i].id_;
// set_K[k_i] = set_L[k_i].id_;
}
}
// {// Reset
//// std::fill(is_visited.begin(), is_visited.end(), 0);
// is_visited.reset();
// }
// time_ending_ += WallTimer::get_time_mark();
// {//test
// if (3 == query_id) {
// exit(1);
// }
// }
}
/*
* 7/29/2020-17:26
* The same procedure with Middle-M, but do hierarchical merging to reduce merging frequency.
* Right now there are only 3 levels (1 middle level). And 4 workers form a group.
*/
inline void Searching::para_search_with_top_m_hierarchy_merge_v0(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_capacity, // Maximum size of local queue
const idi local_master_queue_capacity, // Maximum size of local master queue
// const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length;
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes, // Sizes of local queue
// std::vector< std::vector<idi> > &top_m_candidates_list, // every group has one top-M queue
std::vector<idi> &top_m_candidates,
const std::vector<idi> &top_m_candidates_starts,
std::vector<idi> &top_m_candidates_sizes,
// std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
const idi group_size, // Should be 4
const idi full_merge_freq)
{
// time_initialization_ -= WallTimer::get_time_mark();
// const idi base_set_L = (num_threads_ - 1) * local_queue_length;
const idi master_queue_start = local_queues_starts[num_threads_ - 1];
const idi num_groups = (num_threads_ - 1) / group_size + 1; // 4 workers per group.
const dataf *query_data = queries_load_ + query_id * dimension_;
// Initialization Phase
{
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// Get the distances of all candidates, store in the set set_L.
uint64_t tmp_count_computation = 0;
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + master_queue_start] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
std::sort(
set_L.begin() + master_queue_start,
set_L.begin() + master_queue_start + L);
local_queues_sizes[num_threads_ - 1] = L;
} // Initialization Phase
// time_initialization_ += WallTimer::get_time_mark();
// idi top_m_candidates_end = 0;
idi iter = 0; // for debug
idi M = 1;
idi k = 0; // Index of first unchecked candidate.
// Sequential Phase
{
uint64_t tmp_count_computation = 0;
while (k < L && M < value_M_middle) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
M,
query_id,
query_data,
L,
set_L,
master_queue_start,
local_queues_sizes[num_threads_ - 1],
top_m_candidates,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
{// Double M
if (M < value_M_max) {
M <<= 1U;
}
}
}
} // Sequential Phase
// if (M < static_cast<idi>(num_threads_)) {
// M = num_threads_;
// }
// Parallel Phase
idi para_iter = 0;
if (num_threads_ <= 4) {
idi top_m_candidates_size = 0;
idi last_k;
idi nk;
uint64_t tmp_count_computation = 0;
while (true) {
// while (k < L) {
++iter;
last_k = L;
// Pick top-M
for (idi c_i = k; c_i < L && top_m_candidates_size < M; ++c_i) {
idi index_set_L = c_i + master_queue_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_size++] = set_L[index_set_L].id_;
}
if (!top_m_candidates_size) {
break;
}
// time_pick_top_m_ += WallTimer::get_time_mark();
nk = L;
// Push M candidates' neighbors into the queue.
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) {
int tid = omp_get_thread_num();
idi local_queue_start = local_queues_starts[tid];
idi &local_queue_size = local_queues_sizes[tid];
idi cand_id = top_m_candidates[c_i];
if (num_threads_ - 1 != tid) {
expand_one_candidate(
cand_id,
query_data,
set_L[master_queue_start + L - 1].distance_,
set_L,
local_queue_start,
local_queue_size,
local_queue_capacity,
is_visited,
tmp_count_computation);
} else {
idi r = expand_one_candidate(
cand_id,
query_data,
set_L[master_queue_start + L - 1].distance_,
set_L,
local_queue_start,
local_queue_size,
L,
is_visited,
tmp_count_computation);
if (r < nk) {
nk = r;
}
}
}
top_m_candidates_size = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// // Merge. Merge all queues in parallel.
{
// time_merge_ -= WallTimer::get_time_mark();
if (num_threads_ > 1) {
idi r = merge_all_queues_para_array(
set_L,
local_queues_sizes,
local_queue_capacity,
L);
if (r < nk) {
nk = r;
}
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1U;
}
}
}
} else { // 8 threads
bool is_finished = false;
bool is_full_merged = true;
idi M_group;
std::vector<idi> ks(num_groups, 0);
ks[num_groups - 1] = k;
std::vector<idi> nks(num_groups);
std::vector<idi> last_ks(num_groups);
uint64_t tmp_count_distance_computation = 0;
// bool is_finished = false;
while (!is_finished) {
++para_iter;
++iter;
M_group = M / num_groups;
is_finished = true;
if (1 == para_iter || (para_iter - 1) % full_merge_freq) {
// Initialize every group's top-M candidates from the global Master queue
master_top_m_to_groups(
set_L,
local_queues_starts,
local_queues_sizes,
top_m_candidates,
top_m_candidates_starts,
top_m_candidates_sizes,
ks[num_groups - 1],
last_ks[num_groups - 1],
M,
num_groups);
}
#pragma omp parallel for num_threads(num_groups) \
reduction(+ : tmp_count_distance_computation)
for (idi g_i = 0; g_i < num_groups; ++g_i) {
const idi local_master_queue_id = g_i * group_size + group_size - 1;
const idi local_master_queue_start = local_queues_starts[local_master_queue_id];
idi &local_master_queue_size = local_queues_sizes[local_master_queue_id];
idi &k_uc = ks[g_i];
const idi top_m_candidates_start = top_m_candidates_starts[g_i];
idi &top_m_candidates_size = top_m_candidates_sizes[g_i];
idi &last_k = last_ks[g_i];
// Pick top-M
if (1 != para_iter && 0 == (para_iter - 1) % full_merge_freq) {
// if ((para_iter - 1) % full_merge_freq) {
last_k = L;
for (idi c_i = k_uc; c_i < local_master_queue_size && top_m_candidates_size < M_group; ++c_i) {
idi index_set_L = c_i + local_master_queue_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_start + top_m_candidates_size++] = set_L[index_set_L].id_;
}
}
if (!top_m_candidates_size) {
continue;
}
is_finished = false;
idi &nk = nks[g_i];
nk = L;
idi c_i_start = top_m_candidates_starts[g_i];
idi c_i_bound = c_i_start + top_m_candidates_size;
uint64_t tmp_count_distance_computation_ig = 0;
// Expand top-M
#pragma omp parallel for num_threads(group_size) \
reduction(+ : tmp_count_distance_computation_ig)
for (idi c_i = c_i_start; c_i < c_i_bound; ++c_i) {
idi tid_ig = omp_get_thread_num();
idi q_id = g_i * group_size + tid_ig;
idi cand_id = top_m_candidates[c_i];
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++tmp_count_distance_computation_ig;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (local_master_queue_size == local_master_queue_capacity
&& dist > set_L[local_master_queue_size - 1 + local_master_queue_start].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
if (0 != tid_ig) {
// Non-Master threads using local queues
add_into_queue(
set_L,
local_queues_starts[q_id - 1],
local_queues_sizes[q_id - 1],
local_queue_capacity,
cand);
} else if (num_groups - 1 != g_i) {
// Thread 0 but not the last group maintains the local master queue
idi r = add_into_queue(
set_L,
local_master_queue_start,
local_master_queue_size,
local_master_queue_capacity,
cand);
if (r < nk) {
nk = r;
}
} else {
// Thread 0 and the last group maintains the master queue
idi r = add_into_queue(
set_L,
local_master_queue_start,
local_master_queue_size,
L,
cand);
if (r < nk) {
nk = r;
}
}
}
} // Expand in a group
tmp_count_distance_computation += tmp_count_distance_computation_ig;
top_m_candidates_size = 0;
// Merge in a group
if (0 == (para_iter % full_merge_freq)) {
idi r;
if (num_groups - 1 != g_i) {
// Normal group
r = merge_queues_of_four(
set_L,
local_queues_starts,
local_queues_sizes,
g_i,
local_queue_capacity,
local_master_queue_capacity);
} else {
// The group contains the master queue
r = merge_queues_of_four(
set_L,
local_queues_starts,
local_queues_sizes,
g_i,
local_queue_capacity,
L);
}
if (r < nk) {
nk = r;
}
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
}
} // Middle Level Parallelism
count_distance_computation_ += tmp_count_distance_computation;
tmp_count_distance_computation = 0;
// Do full merge and distribute
if (!is_finished && para_iter % full_merge_freq) {
// Full merge
idi r = merge_all_queues_to_master(
set_L,
local_queues_starts,
local_queues_sizes,
local_queue_capacity,
local_master_queue_capacity,
L,
group_size);
is_full_merged = true;
idi &nk = nks[num_groups - 1];
idi &k_uc = ks[num_groups - 1];
idi &last_k = last_ks[num_groups - 1];
if (r < nk) {
nk = r;
}
if (nk <= last_k) {
k_uc = nk;
} else {
k_uc = last_k + 1;
}
} else {
is_full_merged = false;
}
{// Scale M
if (M < value_M_max) {
M <<= 1U;
}
}
} // Iteration
if (!is_full_merged) {
merge_all_queues_to_master(
set_L,
local_queues_sizes,
local_queues_sizes,
local_queue_capacity,
local_master_queue_capacity,
L,
group_size);
}
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + master_queue_start].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
// std::fill(local_queues_sizes.begin(), local_queues_sizes.end(), 0);
}
// {//test
// if (14 == query_id) {
// exit(1);
// }
// }
}
/*
* Function: expand a candidate, visiting its neighbors.
* Return the lowest adding location.
*/
inline idi Searching::expand_one_candidate(
idi cand_id,
const dataf *query_data,
const distf &dist_bound,
std::vector<Candidate> &set_L,
const idi local_queue_start,
idi &local_queue_size,
const idi &local_queue_capacity,
boost::dynamic_bitset<> &is_visited,
// const idi nk_init,
uint64_t &local_count_computation)
{
// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0);
idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_);
idi out_degree = *out_edges++;
// for (idi n_i = 0; n_i < out_degree; ++n_i) {
// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0);
// }
// tmp_time_pick_top_m += WallTimer::get_time_mark();
idi nk = local_queue_capacity;
for (idi e_i = 0; e_i < out_degree; ++e_i) {
idi nb_id = out_edges[e_i];
{ // Sequential edition
if (is_visited[nb_id]) {
continue;
}
is_visited[nb_id] = 1;
}
auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_);
dataf norm = *nb_data++;
++local_count_computation;
distf dist = compute_distance_with_norm(nb_data, query_data, norm);
if (dist > dist_bound) {
// if (dist > set_L[L - 1 + master_queue_start].distance_) {
continue;
}
Candidate cand(nb_id, dist, false);
// Add to the local queue.
idi r = add_into_queue(
set_L,
local_queue_start,
local_queue_size,
local_queue_capacity,
cand);
if (r < nk) {
nk = r;
}
}
return nk;
}
/*
* 8/6/2020-11:58
* Based on Middle-4, but reduce full merge frequency.
* Actually, this is local Searching, not Less Synchronization.
*/
inline void Searching::para_search_with_top_m_less_sync_v0(
const idi value_M_middle,
const idi value_M_max,
const idi query_id,
const idi K,
const idi L,
std::vector<Candidate> &set_L,
const std::vector<idi> &init_ids,
std::vector<idi> &set_K,
const idi local_queue_capacity, // Maximum size of local queue
const std::vector<idi> &local_queues_starts,
std::vector<idi> &local_queues_sizes, // Sizes of local queue
std::vector<idi> &top_m_candidates,
boost::dynamic_bitset<> &is_visited,
const idi full_merge_freq,
const idi local_iter_bound)
{
const idi master_queue_start = local_queues_starts[num_threads_ - 1];
const dataf *query_data = queries_load_ + query_id * dimension_;
// Initialization Phase
{
//#pragma omp parallel for
for (idi c_i = 0; c_i < L; ++c_i) {
is_visited[init_ids[c_i]] = 1;
}
//#pragma omp parallel for
// for (idi v_i = 0; v_i < L; ++v_i) {
// idi v_id = init_ids[v_i];
// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0);
// }
// Get the distances of all candidates, store in the set set_L.
uint64_t tmp_count_computation = 0;
#pragma omp parallel for reduction(+ : tmp_count_computation)
for (unsigned i = 0; i < L; i++) {
unsigned v_id = init_ids[i];
auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_);
dataf norm = *v_data++;
++tmp_count_computation;
distf dist = compute_distance_with_norm(v_data, query_data, norm);
set_L[i + master_queue_start] = Candidate(v_id, dist, false); // False means not checked.
}
count_distance_computation_ += tmp_count_computation;
// tmp_count_computation = 0;
std::sort(
set_L.begin() + master_queue_start,
set_L.begin() + master_queue_start + L);
local_queues_sizes[num_threads_ - 1] = L;
} // Initialization Phase
idi iter = 0; // for debug
idi M = 1;
idi k = 0; // Index of first unchecked candidate.
// Sequential Phase
{
uint64_t tmp_count_computation = 0;
while (k < L && M < value_M_middle) {
++iter;
subsearch_top_m_for_one_iteration(
iter,
k,
M,
query_id,
query_data,
L,
set_L,
master_queue_start,
local_queues_sizes[num_threads_ - 1],
top_m_candidates,
is_visited,
tmp_count_computation);
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
{// Double M
if (M < value_M_max) {
M <<= 1U;
}
}
}
} // Sequential Phase
// if (M < static_cast<idi>(num_threads_)) {
// M = num_threads_;
// }
// Parallel Phase
idi para_iter = 0;
idi top_m_candidates_size = 0;
idi last_k;
idi nk;
uint64_t tmp_count_computation = 0;
while (true) {
// while (k < L) {
++para_iter;
++iter;
last_k = L;
// Pick top-M
for (idi c_i = k; c_i < L && top_m_candidates_size < M; ++c_i) {
idi index_set_L = c_i + master_queue_start;
if (set_L[index_set_L].is_checked_) {
continue;
}
last_k = c_i; // Record the location of the last candidate selected.
set_L[index_set_L].is_checked_ = true;
top_m_candidates[top_m_candidates_size++] = set_L[index_set_L].id_;
}
if (!top_m_candidates_size) {
break;
}
nk = L;
// Expand
//#pragma omp parallel for reduction(+ : tmp_count_computation)
#pragma omp parallel reduction(+ : tmp_count_computation)
{
#pragma omp for nowait
for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) {
int tid = omp_get_thread_num();
idi local_queue_start = local_queues_starts[tid];
idi &local_queue_size = local_queues_sizes[tid];
idi cand_id = top_m_candidates[c_i];
if (num_threads_ - 1 != tid) {
expand_one_candidate(
cand_id,
query_data,
set_L[master_queue_start + L - 1].distance_,
set_L,
local_queue_start,
local_queue_size,
local_queue_capacity,
is_visited,
tmp_count_computation);
} else {
idi r = expand_one_candidate(
cand_id,
query_data,
set_L[master_queue_start + L - 1].distance_,
set_L,
local_queue_start,
local_queue_size,
L,
is_visited,
tmp_count_computation);
if (r < nk) {
nk = r;
}
}
} // Expand
if (0 == (para_iter % full_merge_freq)) {
// Local search iterations
int q_i = omp_get_thread_num();
idi local_queue_start = local_queues_starts[q_i];
idi &local_queue_size = local_queues_sizes[q_i];
const idi queue_capacity = (num_threads_ - 1 != q_i) ? local_queue_capacity : L;
idi tmp_k;
if (num_threads_ - 1 != q_i) {
tmp_k = 0;
} else {
if (nk <= last_k) {
tmp_k = nk;
} else {
tmp_k = last_k + 1;
}
}
// if (tmp_k >= local_queue_size) {
// continue;
// }
idi i_t = 0;
idi cand_id;
while (tmp_k < local_queue_size) {
idi r;
if (!set_L[local_queue_start + tmp_k].is_checked_) {
set_L[local_queue_start + tmp_k].is_checked_ = true;
cand_id = set_L[local_queue_start + tmp_k].id_;
// Expand
r = expand_one_candidate(
cand_id,
query_data,
set_L[master_queue_start + L - 1].distance_,
set_L,
local_queue_start,
local_queue_size,
queue_capacity,
is_visited,
tmp_count_computation);
if (++i_t == local_iter_bound) {
break;
}
} else {
r = queue_capacity;
}
if (r <= tmp_k) {
tmp_k = r;
} else {
++tmp_k;
}
}
} // Local Search
} // OMP Parallel Construct
top_m_candidates_size = 0; // Clear top_m_candidates
count_distance_computation_ += tmp_count_computation;
tmp_count_computation = 0;
// // Merge. Merge all queues in parallel.
{
// time_merge_ -= WallTimer::get_time_mark();
if (num_threads_ > 1) {
idi r = merge_all_queues_para_array(
set_L,
local_queues_sizes,
local_queue_capacity,
L);
if (r < nk) {
nk = r;
}
}
}
if (nk <= last_k) {
k = nk;
} else {
k = last_k + 1;
}
{// Scale M
if (M < value_M_max) {
M <<= 1U;
}
}
}
#pragma omp parallel for
for (idi k_i = 0; k_i < K; ++k_i) {
set_K[k_i] = set_L[k_i + master_queue_start].id_;
// set_K[k_i] = set_L[k_i].id_;
}
{// Reset
// std::fill(is_visited.begin(), is_visited.end(), 0);
is_visited.reset();
// is_visited.clear_all();
// std::fill(local_queues_sizes.begin(), local_queues_sizes.end(), 0);
}
// {//test
// if (14 == query_id) {
// exit(1);
// }
// }
}
} // namespace PANNS
#endif //BATCH_SEARCHING_SEARCHING_H
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/compare.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/statistic.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImageChannels() compares one or more image channels of an image
% to a reconstructed image and returns the difference image.
%
% The format of the CompareImageChannels method is:
%
% Image *CompareImageChannels(const Image *image,
% const Image *reconstruct_image,const ChannelType channel,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o channel: the channel.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
Image
*highlight_image;
highlight_image=CompareImageChannels(image,reconstruct_image,
CompositeChannels,metric,distortion,exception);
return(highlight_image);
}
static size_t GetNumberChannels(const Image *image,const ChannelType channel)
{
size_t
channels;
channels=0;
if ((channel & RedChannel) != 0)
channels++;
if ((channel & GreenChannel) != 0)
channels++;
if ((channel & BlueChannel) != 0)
channels++;
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
channels++;
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
channels++;
return(channels == 0 ? 1UL : channels);
}
static inline MagickBooleanType ValidateImageMorphology(
const Image *magick_restrict image,
const Image *magick_restrict reconstruct_image)
{
/*
Does the image match the reconstructed image morphology?
*/
if (GetNumberChannels(image,DefaultChannels) !=
GetNumberChannels(reconstruct_image,DefaultChannels))
return(MagickFalse);
return(MagickTrue);
}
MagickExport Image *CompareImageChannels(Image *image,
const Image *reconstruct_image,const ChannelType channel,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
MagickPixelPacket
highlight,
lowlight,
zero;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
ThrowImageException(ImageError,"ImageMorphologyDiffers");
status=GetImageChannelDistortion(image,reconstruct_image,channel,metric,
distortion,exception);
if (status == MagickFalse)
return((Image *) NULL);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,(Image *) NULL);
difference_image=CloneImage(clone_image,0,0,MagickTrue,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
if (SetImageStorageClass(highlight_image,DirectClass) == MagickFalse)
{
InheritException(exception,&highlight_image->exception);
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,(Image *) NULL);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel);
(void) QueryMagickColor("#f1001ecc",&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&highlight,exception);
(void) QueryMagickColor("#ffffffcc",&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryMagickColor(artifact,&lowlight,exception);
if (highlight_image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&lowlight);
}
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
GetMagickPixelPacket(image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel,
reconstruct_pixel;
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
IndexPacket
*magick_restrict highlight_indexes;
PixelPacket
*magick_restrict r;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(q == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
highlight_indexes=GetCacheViewAuthenticIndexQueue(highlight_view);
pixel=zero;
reconstruct_pixel=zero;
for (x=0; x < (ssize_t) columns; x++)
{
MagickStatusType
difference;
SetMagickPixelPacket(image,p,indexes+x,&pixel);
SetMagickPixelPacket(reconstruct_image,q,reconstruct_indexes+x,
&reconstruct_pixel);
difference=MagickFalse;
if (channel == CompositeChannels)
{
if (IsMagickColorSimilar(&pixel,&reconstruct_pixel) == MagickFalse)
difference=MagickTrue;
}
else
{
double
Da,
distance,
pixel,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(q) :
(QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
pixel=Sa*GetPixelRed(p)-Da*GetPixelRed(q);
distance=pixel*pixel;
if (distance >= fuzz)
difference=MagickTrue;
}
if ((channel & GreenChannel) != 0)
{
pixel=Sa*GetPixelGreen(p)-Da*GetPixelGreen(q);
distance=pixel*pixel;
if (distance >= fuzz)
difference=MagickTrue;
}
if ((channel & BlueChannel) != 0)
{
pixel=Sa*GetPixelBlue(p)-Da*GetPixelBlue(q);
distance=pixel*pixel;
if (distance >= fuzz)
difference=MagickTrue;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=(double) GetPixelOpacity(p)-GetPixelOpacity(q);
distance=pixel*pixel;
if (distance >= fuzz)
difference=MagickTrue;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=Sa*indexes[x]-Da*reconstruct_indexes[x];
distance=pixel*pixel;
if (distance >= fuzz)
difference=MagickTrue;
}
}
if (difference != MagickFalse)
SetPixelPacket(highlight_image,&highlight,r,highlight_indexes+x);
else
SetPixelPacket(highlight_image,&lowlight,r,highlight_indexes+x);
p++;
q++;
r++;
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,image->compose,highlight_image,0,0);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDistortion() compares one or more image channels of an image
% to a reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageChannelDistortion method is:
%
% MagickBooleanType GetImageChannelDistortion(const Image *image,
% const Image *reconstruct_image,const ChannelType channel,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o channel: the channel.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelDistortion(image,reconstruct_image,CompositeChannels,
metric,distortion,exception);
return(status);
}
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
distance,
pixel,
Sa;
MagickBooleanType
difference;
difference=MagickFalse;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(q) :
(QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
pixel=Sa*GetPixelRed(p)-Da*GetPixelRed(q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[RedChannel]++;
difference=MagickTrue;
}
}
if ((channel & GreenChannel) != 0)
{
pixel=Sa*GetPixelGreen(p)-Da*GetPixelGreen(q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[GreenChannel]++;
difference=MagickTrue;
}
}
if ((channel & BlueChannel) != 0)
{
pixel=Sa*GetPixelBlue(p)-Da*GetPixelBlue(q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[BlueChannel]++;
difference=MagickTrue;
}
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=(double) GetPixelOpacity(p)-GetPixelOpacity(q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[OpacityChannel]++;
difference=MagickTrue;
}
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=Sa*indexes[x]-Da*reconstruct_indexes[x];
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[BlackChannel]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositeChannels]++;
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & OpacityChannel) != 0) && ((image->matte != MagickFalse) ||
(reconstruct_image->matte != MagickFalse)))
{
distance=QuantumScale*((image->matte != MagickFalse ?
GetPixelOpacity(p) : OpaqueOpacity)-
(reconstruct_image->matte != MagickFalse ?
GetPixelOpacity(q): OpaqueOpacity));
channel_distortion[OpacityChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*(Sa*GetPixelIndex(indexes+x)-
Da*GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) columns*rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*fabs((double) (Sa*GetPixelRed(p)-Da*
GetPixelRed(q)));
channel_distortion[RedChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*fabs((double) (Sa*GetPixelGreen(p)-Da*
GetPixelGreen(q)));
channel_distortion[GreenChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*fabs((double) (Sa*GetPixelBlue(p)-Da*
GetPixelBlue(q)));
channel_distortion[BlueChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*fabs((double) (GetPixelOpacity(p)-(double)
GetPixelOpacity(q)));
channel_distortion[OpacityChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
distance=QuantumScale*fabs((double) (Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x)));
channel_distortion[BlackChannel]+=distance;
channel_distortion[CompositeChannels]+=distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) columns*rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
MagickRealType
area,
gamma,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=fabs((double) (Sa*GetPixelRed(p)-Da*GetPixelRed(q)));
distortion[RedChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((channel & GreenChannel) != 0)
{
distance=fabs((double) (Sa*GetPixelGreen(p)-Da*GetPixelGreen(q)));
distortion[GreenChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((channel & BlueChannel) != 0)
{
distance=fabs((double) (Sa*GetPixelBlue(p)-Da*GetPixelBlue(q)));
distortion[BlueChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=fabs((double) (GetPixelOpacity(p)-
(double) GetPixelOpacity(q)));
distortion[OpacityChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=fabs((double) (Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x)));
distortion[BlackChannel]+=distance;
distortion[CompositeChannels]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p++;
q++;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*distortion[CompositeChannels];
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelRed(p)-Da*GetPixelRed(q));
channel_distortion[RedChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelGreen(p)-Da*GetPixelGreen(q));
channel_distortion[GreenChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*(Sa*GetPixelBlue(p)-Da*GetPixelBlue(q));
channel_distortion[BlueChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*(GetPixelOpacity(p)-(MagickRealType)
GetPixelOpacity(q));
channel_distortion[OpacityChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*(Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x));
channel_distortion[BlackChannel]+=distance*distance;
channel_distortion[CompositeChannels]+=distance*distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]+=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]/=((double) columns*rows);
distortion[CompositeChannels]/=(double) GetNumberChannels(image,channel);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickRealType
area;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageChannelStatistics(image,exception);
reconstruct_statistics=GetImageChannelStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=1.0/((MagickRealType) columns*rows);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
distortion[RedChannel]+=area*QuantumScale*(Sa*GetPixelRed(p)-
image_statistics[RedChannel].mean)*(Da*GetPixelRed(q)-
reconstruct_statistics[RedChannel].mean);
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]+=area*QuantumScale*(Sa*GetPixelGreen(p)-
image_statistics[GreenChannel].mean)*(Da*GetPixelGreen(q)-
reconstruct_statistics[GreenChannel].mean);
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]+=area*QuantumScale*(Sa*GetPixelBlue(p)-
image_statistics[BlueChannel].mean)*(Da*GetPixelBlue(q)-
reconstruct_statistics[BlueChannel].mean);
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
distortion[OpacityChannel]+=area*QuantumScale*(
GetPixelOpacity(p)-image_statistics[OpacityChannel].mean)*
(GetPixelOpacity(q)-reconstruct_statistics[OpacityChannel].mean);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
distortion[BlackChannel]+=area*QuantumScale*(Sa*
GetPixelIndex(indexes+x)-image_statistics[BlackChannel].mean)*(Da*
GetPixelIndex(reconstruct_indexes+x)-
reconstruct_statistics[BlackChannel].mean);
p++;
q++;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
gamma;
gamma=image_statistics[i].standard_deviation*
reconstruct_statistics[i].standard_deviation;
if (fabs(gamma) >= MagickEpsilon)
{
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
}
}
distortion[CompositeChannels]=0.0;
if ((channel & RedChannel) != 0)
distortion[CompositeChannels]+=distortion[RedChannel]*
distortion[RedChannel];
if ((channel & GreenChannel) != 0)
distortion[CompositeChannels]+=distortion[GreenChannel]*
distortion[GreenChannel];
if ((channel & BlueChannel) != 0)
distortion[CompositeChannels]+=distortion[BlueChannel]*
distortion[BlueChannel];
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
distortion[CompositeChannels]+=distortion[OpacityChannel]*
distortion[OpacityChannel];
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
distortion[CompositeChannels]+=distortion[BlackChannel]*
distortion[BlackChannel];
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]/
GetNumberChannels(image,channel));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[CompositeChannels+1];
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance,
Da,
Sa;
Sa=QuantumScale*(image->matte != MagickFalse ? GetPixelAlpha(p) :
(QuantumRange-OpaqueOpacity));
Da=QuantumScale*(reconstruct_image->matte != MagickFalse ?
GetPixelAlpha(q) : (QuantumRange-OpaqueOpacity));
if ((channel & RedChannel) != 0)
{
distance=QuantumScale*fabs((double) (Sa*GetPixelRed(p)-Da*
GetPixelRed(q)));
if (distance > channel_distortion[RedChannel])
channel_distortion[RedChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if ((channel & GreenChannel) != 0)
{
distance=QuantumScale*fabs((double) (Sa*GetPixelGreen(p)-Da*
GetPixelGreen(q)));
if (distance > channel_distortion[GreenChannel])
channel_distortion[GreenChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if ((channel & BlueChannel) != 0)
{
distance=QuantumScale*fabs((double) (Sa*GetPixelBlue(p)-Da*
GetPixelBlue(q)));
if (distance > channel_distortion[BlueChannel])
channel_distortion[BlueChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
distance=QuantumScale*fabs((double) (GetPixelOpacity(p)-(double)
GetPixelOpacity(q)));
if (distance > channel_distortion[OpacityChannel])
channel_distortion[OpacityChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=QuantumScale*fabs((double) (Sa*GetPixelIndex(indexes+x)-Da*
GetPixelIndex(reconstruct_indexes+x)));
if (distance > channel_distortion[BlackChannel])
channel_distortion[BlackChannel]=distance;
if (distance > channel_distortion[CompositeChannels])
channel_distortion[CompositeChannels]=distance;
}
p++;
q++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (i=0; i <= (ssize_t) CompositeChannels; i++)
if (channel_distortion[i] > distortion[i])
distortion[i]=channel_distortion[i];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,const ChannelType channel,
double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion,
exception);
if ((channel & RedChannel) != 0)
{
if (fabs(distortion[RedChannel]) < MagickEpsilon)
distortion[RedChannel]=INFINITY;
else
distortion[RedChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[RedChannel]);
}
if ((channel & GreenChannel) != 0)
{
if (fabs(distortion[GreenChannel]) < MagickEpsilon)
distortion[GreenChannel]=INFINITY;
else
distortion[GreenChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[GreenChannel]);
}
if ((channel & BlueChannel) != 0)
{
if (fabs(distortion[BlueChannel]) < MagickEpsilon)
distortion[BlueChannel]=INFINITY;
else
distortion[BlueChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[BlueChannel]);
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
{
if (fabs(distortion[OpacityChannel]) < MagickEpsilon)
distortion[OpacityChannel]=INFINITY;
else
distortion[OpacityChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[OpacityChannel]);
}
if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace))
{
if (fabs(distortion[BlackChannel]) < MagickEpsilon)
distortion[BlackChannel]=INFINITY;
else
distortion[BlackChannel]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[BlackChannel]);
}
if (fabs(distortion[CompositeChannels]) < MagickEpsilon)
distortion[CompositeChannels]=INFINITY;
else
distortion[CompositeChannels]=10.0*MagickLog10(1.0)-10.0*
MagickLog10(distortion[CompositeChannels]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*image_phash,
*reconstruct_phash;
double
difference;
ssize_t
i;
/*
Compute perceptual hash in the sRGB colorspace.
*/
image_phash=GetImageChannelPerceptualHash(image,exception);
if (image_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImageChannelPerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash);
return(MagickFalse);
}
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
/*
Compute sum of moment differences squared.
*/
if ((channel & RedChannel) != 0)
{
difference=reconstruct_phash[RedChannel].P[i]-
image_phash[RedChannel].P[i];
distortion[RedChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & GreenChannel) != 0)
{
difference=reconstruct_phash[GreenChannel].P[i]-
image_phash[GreenChannel].P[i];
distortion[GreenChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & BlueChannel) != 0)
{
difference=reconstruct_phash[BlueChannel].P[i]-
image_phash[BlueChannel].P[i];
distortion[BlueChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) &&
(reconstruct_image->matte != MagickFalse))
{
difference=reconstruct_phash[OpacityChannel].P[i]-
image_phash[OpacityChannel].P[i];
distortion[OpacityChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
difference=reconstruct_phash[IndexChannel].P[i]-
image_phash[IndexChannel].P[i];
distortion[IndexChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
}
/*
Compute perceptual hash in the HCLP colorspace.
*/
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
/*
Compute sum of moment differences squared.
*/
if ((channel & RedChannel) != 0)
{
difference=reconstruct_phash[RedChannel].Q[i]-
image_phash[RedChannel].Q[i];
distortion[RedChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & GreenChannel) != 0)
{
difference=reconstruct_phash[GreenChannel].Q[i]-
image_phash[GreenChannel].Q[i];
distortion[GreenChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if ((channel & BlueChannel) != 0)
{
difference=reconstruct_phash[BlueChannel].Q[i]-
image_phash[BlueChannel].Q[i];
distortion[BlueChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse) &&
(reconstruct_image->matte != MagickFalse))
{
difference=reconstruct_phash[OpacityChannel].Q[i]-
image_phash[OpacityChannel].Q[i];
distortion[OpacityChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
difference=reconstruct_phash[IndexChannel].Q[i]-
image_phash[IndexChannel].Q[i];
distortion[IndexChannel]+=difference*difference;
distortion[CompositeChannels]+=difference*difference;
}
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
image_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(image_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,const ChannelType channel,double *distortion,
ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,distortion,
exception);
if ((channel & RedChannel) != 0)
distortion[RedChannel]=sqrt(distortion[RedChannel]);
if ((channel & GreenChannel) != 0)
distortion[GreenChannel]=sqrt(distortion[GreenChannel]);
if ((channel & BlueChannel) != 0)
distortion[BlueChannel]=sqrt(distortion[BlueChannel]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
distortion[OpacityChannel]=sqrt(distortion[OpacityChannel]);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
distortion[BlackChannel]=sqrt(distortion[BlackChannel]);
distortion[CompositeChannels]=sqrt(distortion[CompositeChannels]);
return(status);
}
MagickExport MagickBooleanType GetImageChannelDistortion(Image *image,
const Image *reconstruct_image,const ChannelType channel,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
ThrowBinaryException(ImageError,"ImageMorphologyDiffers",image->filename);
/*
Get image distortion.
*/
length=CompositeChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel,channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,channel,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositeChannels];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDistortions() compares the image channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageChannelDistortions method is:
%
% double *GetImageChannelDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageChannelDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (metric != PerceptualHashErrorMetric)
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ImageError,"ImageMorphologyDiffers","`%s'",image->filename);
return((double *) NULL);
}
/*
Get image distortion.
*/
length=CompositeChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case MeanErrorPerPixelMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,CompositeChannels,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
CompositeChannels,channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(Image *image,
% const Image *reconstruct_image)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
*/
MagickExport MagickBooleanType IsImagesEqual(Image *image,
const Image *reconstruct_image)
{
CacheView
*image_view,
*reconstruct_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickRealType
area,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
exception=(&image->exception);
if (ValidateImageMorphology(image,reconstruct_image) == MagickFalse)
ThrowBinaryException(ImageError,"ImageMorphologyDiffers",image->filename);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const IndexPacket
*magick_restrict indexes,
*magick_restrict reconstruct_indexes;
const PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL))
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
reconstruct_indexes=GetCacheViewVirtualIndexQueue(reconstruct_view);
for (x=0; x < (ssize_t) columns; x++)
{
MagickRealType
distance;
distance=fabs((double) (GetPixelRed(p)-(double) GetPixelRed(q)));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
distance=fabs((double) (GetPixelGreen(p)-(double) GetPixelGreen(q)));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
distance=fabs((double) (GetPixelBlue(p)-(double) GetPixelBlue(q)));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
if (image->matte != MagickFalse)
{
distance=fabs((double) (GetPixelOpacity(p)-(double)
GetPixelOpacity(q)));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
if ((image->colorspace == CMYKColorspace) &&
(reconstruct_image->colorspace == CMYKColorspace))
{
distance=fabs((double) (GetPixelIndex(indexes+x)-(double)
GetPixelIndex(reconstruct_indexes+x)));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p++;
q++;
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
(void) status;
similarity_image=DestroyImage(similarity_image);
return(distortion);
}
MagickExport Image *SimilarityImage(Image *image,const Image *reference,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
Image
*similarity_image;
similarity_image=SimilarityMetricImage(image,reference,
RootMeanSquaredErrorMetric,offset,similarity_metric,exception);
return(similarity_image);
}
MagickExport Image *SimilarityMetricImage(Image *image,const Image *reference,
const MetricType metric,RectangleInfo *offset,double *similarity_metric,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
const char
*artifact;
double
similarity_threshold;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
if (ValidateImageMorphology(image,reference) == MagickFalse)
ThrowImageException(ImageError,"ImageMorphologyDiffers");
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(similarity_image,DirectClass) == MagickFalse)
{
InheritException(exception,&similarity_image->exception);
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel);
/*
Measure similarity of reference image against image.
*/
similarity_threshold=(-1.0);
artifact=GetImageArtifact(image,"compare:similarity-threshold");
if (artifact != (const char *) NULL)
similarity_threshold=StringToDouble(artifact,(char **) NULL);
status=MagickTrue;
progress=0;
similarity_view=AcquireVirtualCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
ssize_t
x;
PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
*similarity_metric=similarity;
offset->x=x;
offset->y=y;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
SetPixelRed(q,ClampToQuantum(QuantumRange-QuantumRange*similarity));
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
q++;
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
episerver_fmt_plug.c | /* *New* EPiServer cracker patch for JtR. Hacked together during Summer of
* 2012 by Dhiru Kholia <dhiru.kholia at gmail.com> for GSoC. Based on sample
* code by hashcat's atom.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Obtaining hashes from EPiServer 6.x:
*
* sqlcmd -L
* sqlcmd -S <server> -U sa -P <password> *
* 1> SELECT name from sys.databases
* 2> go
* 1> use <database name>
* 2> select Email, PasswordFormat, PasswordSalt, Password from aspnet_Membership
* 3> go
*
* JtR Input Format:
*
* user:$episerver$*version*base64(salt)*base64(hash)
*
* Where,
*
* version == 0, for EPiServer 6.x standard config / .NET <= 3.5 SHA1 hash/salt format.
* hash = sha1(salt | utf16bytes(password)), PasswordFormat == 1 *
*
* version == 1, EPiServer 6.x + .NET >= 4.x SHA256 hash/salt format,
* PasswordFormat == ?
*
* Improved performance, JimF, July 2012.
* Full Unicode support, magnum, August 2012.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_episerver;
#elif FMT_REGISTERS_H
john_register_one(&fmt_episerver);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "sha.h"
#include "sha2.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "base64.h"
#include "unicode.h"
#include "memdbg.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // core i7 no HT
#endif
#endif
#define FORMAT_LABEL "EPiServer"
#define FORMAT_NAME ""
#define FORMAT_TAG "$episerver$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define BINARY_SIZE 32 /* larger of the two */
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define EFFECTIVE_SALT_SIZE 16
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#include "simd-intrinsics.h"
#include "johnswap.h"
#define NBKEYS_SHA1 (SIMD_COEF_32 * SIMD_PARA_SHA1)
#define NBKEYS_SHA256 (SIMD_COEF_32 * SIMD_PARA_SHA256)
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1 * SIMD_PARA_SHA256)
#define HASH_IDX_IN (((unsigned int)index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32)
#define HASH_IDX_SHA1 (((unsigned int)index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32)
#define HASH_IDX_SHA256 (((unsigned int)index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*8*SIMD_COEF_32)
#define HASH_IDX_OUT (cur_salt->version == 0 ? HASH_IDX_SHA1 : HASH_IDX_SHA256)
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32 ) //for endianness conversion
#define ALGORITHM_NAME "SHA1/SHA256 " SHA256_ALGORITHM_NAME
#define PLAINTEXT_LENGTH 19 // (64 - 9 - 16)/2
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#else
#define ALGORITHM_NAME "SHA1/SHA256 32/" ARCH_BITS_STR " " SHA2_LIB
#define PLAINTEXT_LENGTH 32
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 16
#endif
static struct fmt_tests episerver_tests[] = {
{"$episerver$*0*fGJ2wn/5WlzqQoDeCA2kXA==*UQgnz/vPWap9UeD8Dhaw3h/fgFA=", "testPassword"},
{"$episerver$*0*fGJ2wn/5WlzqQoDeCA2kXA==*uiP1YrZlVcHESbfsRt/wljwNeYU=", "sss"},
{"$episerver$*0*fGJ2wn/5WlzqQoDeCA2kXA==*dxTlKqnxaVHs0210VcX+48QDonA=", "notused"},
// hashes from pass_gen.pl, including some V1 data
{"$episerver$*0*OHdOb002Z1J6ZFhlRHRzbw==*74l+VCC9xkGP27sNLPLZLRI/O5A", "test1"},
{"$episerver$*0*THk5ZHhYNFdQUDV1Y0hScg==*ik+FVrPkEs6LfJU88xl5oBRoZjY", ""},
{"$episerver$*1*aHIza2pUY0ZkR2dqQnJrNQ==*1KPAZriqakiNvE6ML6xkUzS11QPREziCvYkJc4UtjWs","test1"},
{"$episerver$*1*RUZzRmNja0c5NkN0aDlMVw==*nh46rc4vkFIL0qGUrKTPuPWO6wqoESSeAxUNccEOe28","thatsworking"},
{"$episerver$*1*cW9DdnVVUnFwM2FobFc4dg==*Zr/nekpDxU5gjt+fzTSqm0j/twZySBBW44Csoai2Fug","test3"},
{"$episerver$*0*b0lvUnlWbkVlSFJQTFBMeg==*K7NAoB/wZfZjsG4DuMkNqKYwfTs", "123456789"},
{NULL}
};
#ifdef SIMD_COEF_32
static uint32_t *saved_key;
static uint32_t *crypt_out;
#else
static char (*saved_key)[3 * PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
#endif
static struct custom_salt {
int version;
unsigned char esalt[18 + 1]; /* base64 decoding, 24 / 4 * 3 = 18 */
} *cur_salt;
#if defined(_OPENMP) || defined(SIMD_COEF_32)
static int omp_t = 1;
#endif
#ifdef SIMD_COEF_32
static void episerver_set_key_utf8(char *_key, int index);
static void episerver_set_key_CP(char *_key, int index);
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_32
saved_key = mem_calloc_align(self->params.max_keys_per_crypt*SHA_BUF_SIZ,
sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt*BINARY_SIZE/4,
sizeof(*crypt_out), MEM_ALIGN_SIMD);
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
#endif
#ifdef SIMD_COEF_32
if (options.target_enc == UTF_8) {
self->methods.set_key = episerver_set_key_utf8;
self->params.plaintext_length = PLAINTEXT_LENGTH * 3;
}
else if (options.target_enc != ISO_8859_1 &&
options.target_enc != ASCII)
self->methods.set_key = episerver_set_key_CP;
#else
if (options.target_enc == UTF_8)
self->params.plaintext_length = PLAINTEXT_LENGTH * 3;
#endif
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ptr, *ctcopy, *keeptr;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
if (!(ctcopy = strdup(ciphertext)))
return 0;
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip leading '$episerver$*' */
if (strlen(ciphertext) > 255)
goto error;
if (!(ptr = strtokm(ctcopy, "*")))
goto error;
/* check version, must be '0' or '1' */
if (*ptr != '0' && *ptr != '1')
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* salt */
goto error;
if (strlen(ptr) > 24)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* hash */
goto error;
if (strlen(ptr) > 44)
goto error;
if ((ptr = strtokm(NULL, "*"))) /* end */
goto error;
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char _ctcopy[256], *ctcopy=_ctcopy;
char *p;
memset(&cs, 0, sizeof(cs));
strncpy(ctcopy, ciphertext, 255);
ctcopy[255] = 0;
ctcopy += FORMAT_TAG_LEN; /* skip over "$episerver$*" */
p = strtokm(ctcopy, "*");
cs.version = atoi(p);
p = strtokm(NULL, "*");
base64_decode(p, strlen(p), (char*)cs.esalt);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE + 1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
memset(buf.c, 0, sizeof(buf.c));
p = strrchr(ciphertext, '*') + 1;
base64_decode(p, strlen(p), (char*)out);
#ifdef SIMD_COEF_32
alter_endianity(out, BINARY_SIZE);
#endif
return out;
}
#ifdef SIMD_COEF_32
static int get_hash_0 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#endif
static void set_salt(void *salt)
{
#ifdef SIMD_COEF_32
int index, j;
cur_salt = (struct custom_salt *)salt;
for (index = 0; index < MAX_KEYS_PER_CRYPT*omp_t; ++index)
for (j = 0; j < EFFECTIVE_SALT_SIZE; ++j) // copy the salt to vector buffer
((unsigned char*)saved_key)[GETPOS(j, index)] = ((unsigned char*)cur_salt->esalt)[j];
#else
cur_salt = (struct custom_salt *)salt;
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#ifdef SIMD_COEF_32
for (index = 0; index < count; index += (cur_salt->version == 0 ? NBKEYS_SHA1 : NBKEYS_SHA256))
{
uint32_t *in = &saved_key[HASH_IDX_IN];
uint32_t *out = &crypt_out[HASH_IDX_OUT];
if(cur_salt->version == 0)
SIMDSHA1body(in, out, NULL, SSEi_MIXED_IN);
else if(cur_salt->version == 1)
SIMDSHA256body(in, out, NULL, SSEi_MIXED_IN);
}
#else
for (index = 0; index < count; index++)
{
unsigned char passwordBuf[PLAINTEXT_LENGTH*2+2];
int len;
len = enc_to_utf16((UTF16*)passwordBuf, PLAINTEXT_LENGTH,
(UTF8*)saved_key[index], strlen(saved_key[index]));
if (len < 0)
len = strlen16((UTF16*)passwordBuf);
len <<= 1;
if(cur_salt->version == 0) {
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->esalt, EFFECTIVE_SALT_SIZE);
SHA1_Update(&ctx, passwordBuf, len);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
}
else if(cur_salt->version == 1) {
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, cur_salt->esalt, EFFECTIVE_SALT_SIZE);
SHA256_Update(&ctx, passwordBuf, len);
SHA256_Final((unsigned char*)crypt_out[index], &ctx);
}
}
#endif
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++) {
#ifdef SIMD_COEF_32
if (*((uint32_t*)binary) == crypt_out[HASH_IDX_OUT])
#else
if (*((ARCH_WORD_32*)binary) == crypt_out[index][0])
#endif
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
#if SIMD_COEF_32
return *((uint32_t*)binary) == crypt_out[HASH_IDX_OUT];
#else
return (*((ARCH_WORD_32*)binary) == crypt_out[index][0]);
#endif
}
static int cmp_exact(char *source, int index)
{
void *binary = get_binary(source);
#if SIMD_COEF_32
uint32_t out[BINARY_SIZE/4];
int i;
for (i = 0; i < BINARY_SIZE/4; ++i)
out[i] = crypt_out[HASH_IDX_OUT + i*SIMD_COEF_32];
if(cur_salt->version == 0)
return !memcmp(binary, out, 20);
else
return !memcmp(binary, out, BINARY_SIZE);
#else
if(cur_salt->version == 0)
return !memcmp(binary, crypt_out[index], 20);
else
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
#endif
}
static void episerver_set_key(char *_key, int index)
{
#ifdef SIMD_COEF_32
unsigned char *key = (unsigned char*)_key;
uint32_t *keybuf = &saved_key[HASH_IDX_IN];
uint32_t *keybuf_word = keybuf + 4*SIMD_COEF_32; // skip over the salt
unsigned int len, temp2;
len = EFFECTIVE_SALT_SIZE >> 1;
while((temp2 = *key++)) {
unsigned int temp;
if ((temp = *key++))
{
*keybuf_word = JOHNSWAP((temp << 16) | temp2);
}
else
{
*keybuf_word = JOHNSWAP((0x80 << 16) | temp2);
len++;
goto key_cleaning;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = (0x80U << 24);
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuf[15*SIMD_COEF_32] = len << 4;
#else
strcpy(saved_key[index], _key);
#endif
}
#ifdef SIMD_COEF_32
static void episerver_set_key_CP(char *_key, int index)
{
unsigned char *key = (unsigned char*)_key;
uint32_t *keybuf = &saved_key[HASH_IDX_IN];
uint32_t *keybuf_word = keybuf + 4*SIMD_COEF_32; // skip over the salt
unsigned int len, temp2;
len = EFFECTIVE_SALT_SIZE >> 1;
while((temp2 = *key++)) {
unsigned int temp;
temp2 = CP_to_Unicode[temp2];
if ((temp = *key++))
{
temp = CP_to_Unicode[temp];
*keybuf_word = JOHNSWAP((temp << 16) | temp2);
}
else
{
*keybuf_word = JOHNSWAP((0x80 << 16) | temp2);
len++;
goto key_cleaning;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = (0x80U << 24);
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuf[15*SIMD_COEF_32] = len << 4;
}
#endif
#ifdef SIMD_COEF_32
static void episerver_set_key_utf8(char *_key, int index)
{
const UTF8 *source = (UTF8*)_key;
uint32_t *keybuf = &saved_key[HASH_IDX_IN];
uint32_t *keybuf_word = keybuf + 4*SIMD_COEF_32; // skip over the salt
UTF32 chl, chh = 0x80;
unsigned int len;
len = EFFECTIVE_SALT_SIZE >> 1;
while (*source) {
chl = *source;
if (chl >= 0xC0) {
unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f];
switch (extraBytesToRead) {
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 2:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chl -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
if (chl > UNI_MAX_BMP) {
if (len == PLAINTEXT_LENGTH + (EFFECTIVE_SALT_SIZE>>1)) {
chh = 0x80;
*keybuf_word = JOHNSWAP((chh << 16) | chl);
keybuf_word += SIMD_COEF_32;
break;
}
#define halfBase 0x0010000UL
#define halfShift 10
#define halfMask 0x3FFUL
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_LOW_START (UTF32)0xDC00
chl -= halfBase;
chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);;
chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START);
len++;
} else if (*source && len < PLAINTEXT_LENGTH + (EFFECTIVE_SALT_SIZE>>1)) {
chh = *source;
if (chh >= 0xC0) {
unsigned int extraBytesToRead =
opt_trailingBytesUTF8[chh & 0x3f];
switch (extraBytesToRead) {
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 2:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chh -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
} else {
chh = 0x80;
*keybuf_word = JOHNSWAP((chh << 16) | chl);
keybuf_word += SIMD_COEF_32;
break;
}
*keybuf_word = JOHNSWAP((chh << 16) | chl);
keybuf_word += SIMD_COEF_32;
}
if (chh != 0x80 || len == (EFFECTIVE_SALT_SIZE>>1)) {
*keybuf_word = (0x80U << 24);
keybuf_word += SIMD_COEF_32;
}
bailout:
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuf[15*SIMD_COEF_32] = len << 4;
}
#endif
static char *get_key(int index)
{
#ifdef SIMD_COEF_32
static UTF16 out[PLAINTEXT_LENGTH + 1];
unsigned int i,s;
s = ((saved_key[HASH_IDX_IN + 15*SIMD_COEF_32] >> 3) - 16) >> 1;
for(i = 0; i < s; i++)
out[i] = ((unsigned char*)saved_key)[GETPOS(16 + (i<<1), index)] | (((unsigned char*)saved_key)[GETPOS(16 + (i<<1) + 1, index)] << 8);
out[i] = 0;
return (char*)utf16_to_enc(out);
#else
return saved_key[index];
#endif
}
/* report hash type: 1 SHA1, 2 SHA256 */
static unsigned int hash_type(void *salt)
{
struct custom_salt *my_salt = salt;
return (unsigned int) (1 + my_salt->version);
}
struct fmt_main fmt_episerver = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8,
{
"hash type [1: SHA1 2:SHA256]",
},
{ FORMAT_TAG },
episerver_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
hash_type,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
episerver_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
piOpenMP_error.c | #include <stdio.h>
#include <time.h>
int num_steps = 100000000;
double step;
int main(int argc, char* argv[])
{
int i;
double start_time, stop_time;
double pi, sum=0.0;
step = 1./(double)num_steps;
start_time = clock();
#pragma omp parallel for
for (i = 0; i < num_steps; ++i)
{
double x = (i+.5)*step;
sum += 4.0/(1.+x*x);
}
pi = sum*step;
stop_time = clock();
printf("PIの値 %10.7f\n", pi);
printf("PIの計算時間 %1f seconds\n",
((double)(stop_time - start_time)/CLOCKS_PER_SEC));
return 0;
}
|
gather_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: jxyang@openailab.com
* Update: hhchen@openailab.com
*/
#include <math.h>
#include "sys_port.h"
#include "module.h"
#include "tengine_errno.h"
#include "tengine_log.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "gather_param.h"
typedef struct
{
int in_shape[4]; // the dim of the input
int axis;
int indices_num;
int dim_size;
} gather_param_t;
static int ref_gather_fp32(float* input, int* input_indices, float* output, gather_param_t* param, int num_thread)
{
float* out_ptr = output;
float* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
}
// #pragma omp parallel for num_threads(num_thread)
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size * sizeof(float));
}
}
return 0;
}
static int ref_gather_uint8(uint8_t* input, int* input_indices, uint8_t* output, gather_param_t* param, int num_thread)
{
uint8_t* out_ptr = output;
uint8_t* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
}
// #pragma omp parallel for num_threads(num_thread)
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size);
}
}
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct gather_param* gather_param = ( struct gather_param* )ir_node->op.param_mem;
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
op_priv_info->axis = gather_param->axis;
op_priv_info->indices_num = gather_param->indices_num;
/* prerun now */
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct ir_tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct ir_tensor* indices_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
int out_size = input_tensor->elem_num;
// auto in_dim = input_tensor->GetShape().GetDim();
void* input = input_tensor->data;
void* indices_data = indices_tensor->data;
op_priv_info->dim_size = input_tensor->dim_num;
for (int i = 0; i < op_priv_info->dim_size; i++)
{
op_priv_info->in_shape[i] = input_tensor->dims[i];
}
// int indices_num = op_param.indices_num;
void* output = output_tensor->data;
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_gather_fp32(input, indices_data, output, op_priv_info, exec_graph->num_thread);
else if(input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_gather_uint8(input, indices_data, output, op_priv_info, exec_graph->num_thread);
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
gather_param_t* op_priv_info = ( gather_param_t* )sys_malloc(sizeof(gather_param_t));
if (op_priv_info == NULL)
{
set_tengine_errno(ENOMEM);
return -1;
}
memset(op_priv_info, 0, sizeof(gather_param_t));
exec_node->ops_priv = op_priv_info;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
sys_free(op_priv_info);
exec_node->ops_priv = NULL;
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops gather_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_gather_ops(void* arg)
{
return register_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
static int unreg_gather_ops(void* arg)
{
return unregister_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
AUTO_REGISTER_OPS(reg_gather_ops);
AUTO_UNREGISTER_OPS(unreg_gather_ops);
|
bt.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 3.0 structured OpenMP C versions - BT
This benchmark is an OpenMP C version of the NPB BT code.
The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions
in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: R. Van der Wijngaart
T. Harris
M. Yarrow
OpenMP C version: S. Satoh
3.0 structure translation: M. Popov
--------------------------------------------------------------------*/
#include "../common/npb-C.h"
/* global variables */
#include "header.h"
/* function declarations */
static void add(void);
static void adi(void);
static void error_norm(double rms[5]);
static void rhs_norm(double rms[5]);
static void exact_rhs(void);
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]);
static void initialize(void);
static void lhsinit(void);
static void lhsx(void);
static void lhsy(void);
static void lhsz(void);
static void compute_rhs(void);
static void set_constants(void);
static void verify(int no_time_steps, char *class, boolean *verified);
static void x_solve(void);
static void x_backsubstitute(void);
static void x_solve_cell(void);
static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]);
static void matmul_sub(double ablock[5][5], double bblock[5][5],
double cblock[5][5]);
static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]);
static void binvrhs(double lhs[5][5], double r[5]);
static void y_solve(void);
static void y_backsubstitute(void);
static void y_solve_cell(void);
static void z_solve(void);
static void z_backsubstitute(void);
static void z_solve_cell(void);
/*--------------------------------------------------------------------
program BT
c-------------------------------------------------------------------*/
int main(int argc, char **argv) {
int niter, step, n3;
int nthreads = 1;
double navg, mflops;
double tmax;
boolean verified;
char class;
FILE *fp;
/*--------------------------------------------------------------------
c Root node reads input file (if it exists) else takes
c defaults from parameters
c-------------------------------------------------------------------*/
printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version"
" - BT Benchmark\n\n");
fp = fopen("inputbt.data", "r");
if (fp != NULL) {
printf(" Reading from input file inputbt.data");
fscanf(fp, "%d", &niter);
while (fgetc(fp) != '\n');
fscanf(fp, "%lg", &dt);
while (fgetc(fp) != '\n');
fscanf(fp, "%d%d%d",
&grid_points[0], &grid_points[1], &grid_points[2]);
fclose(fp);
} else {
printf(" No input file inputbt.data. Using compiled defaults\n");
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
}
printf(" Size: %3dx%3dx%3d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %3d dt: %10.6f\n", niter, dt);
if (grid_points[0] > IMAX ||
grid_points[1] > JMAX ||
grid_points[2] > KMAX) {
printf(" %dx%dx%d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
exit(1);
}
set_constants();
initialize();
lhsinit();
exact_rhs();
/*--------------------------------------------------------------------
c do one time step to touch all code, and reinitialize
c-------------------------------------------------------------------*/
adi();
initialize();
timer_clear(1);
timer_start(1);
for (step = 1; step <= niter; step++) {
if (step%20 == 0 || step == 1) {
printf(" Time step %4d\n", step);
}
adi();
}
#pragma omp parallel
{
#if defined(_OPENMP)
#pragma omp master
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop(1);
tmax = timer_read(1);
verify(niter, &class, &verified);
n3 = grid_points[0]*grid_points[1]*grid_points[2];
navg = (grid_points[0]+grid_points[1]+grid_points[2])/3.0;
if ( tmax != 0.0 ) {
mflops = 1.0e-6*(double)niter*
(3478.8*(double)n3-17655.7*pow2(navg)+28023.7*navg) / tmax;
} else {
mflops = 0.0;
}
c_print_results("BT", class, grid_points[0],
grid_points[1], grid_points[2], niter, nthreads,
tmax, mflops, " floating point",
verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5,
CS6, "(none)");
}
/*--------------------------------------------------------------------
c-------------------------------------------------------------------*/
static void add(void) {
/*--------------------------------------------------------------------
c addition of update to the vector u
c-------------------------------------------------------------------*/
int i, j, k, m;
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = u[i][j][k][m] + rhs[i][j][k][m];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void adi(void) {
#pragma omp parallel
compute_rhs();
#pragma omp parallel
x_solve();
#pragma omp parallel
y_solve();
#pragma omp parallel
z_solve();
#pragma omp parallel
add();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void error_norm(double rms[5]) {
/*--------------------------------------------------------------------
c this function computes the norm of the difference between the
c computed solution and the exact solution
c-------------------------------------------------------------------*/
int i, j, k, m, d;
double xi, eta, zeta, u_exact[5], add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++) {
add = u[i][j][k][m] - u_exact[m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void rhs_norm(double rms[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
int i, j, k, d, m;
double add;
for (m = 0; m < 5; m++) {
rms[m] = 0.0;
}
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
add = rhs[i][j][k][m];
rms[m] = rms[m] + add*add;
}
}
}
}
for (m = 0; m < 5; m++) {
for (d = 0; d <= 2; d++) {
rms[m] = rms[m] / (double)(grid_points[d]-2);
}
rms[m] = sqrt(rms[m]);
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_rhs(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c compute the right hand side based on exact solution
c-------------------------------------------------------------------*/
double dtemp[5], xi, eta, zeta, dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
/*--------------------------------------------------------------------
c initialize
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = 0.0;
}
}
}
}
/*--------------------------------------------------------------------
c xi-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 1; k < grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m <= 4; m++) {
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] +
buf[i][3] * buf[i][3];
q[i] = 0.5*(buf[i][1]*ue[i][1] + buf[i][2]*ue[i][2] +
buf[i][3]*ue[i][3]);
}
for (i = 1; i < grid_points[0]-1; i++) {
im1 = i-1;
ip1 = i+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
tx2*(ue[ip1][1]-ue[im1][1])+
dx1tx1*(ue[ip1][0]-2.0*ue[i][0]+ue[im1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
tx2 * ((ue[ip1][1]*buf[ip1][1]+c2*(ue[ip1][4]-q[ip1]))-
(ue[im1][1]*buf[im1][1]+c2*(ue[im1][4]-q[im1])))+
xxcon1*(buf[ip1][1]-2.0*buf[i][1]+buf[im1][1])+
dx2tx1*( ue[ip1][1]-2.0* ue[i][1]+ ue[im1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
tx2 * (ue[ip1][2]*buf[ip1][1]-ue[im1][2]*buf[im1][1])+
xxcon2*(buf[ip1][2]-2.0*buf[i][2]+buf[im1][2])+
dx3tx1*( ue[ip1][2]-2.0* ue[i][2]+ ue[im1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
tx2*(ue[ip1][3]*buf[ip1][1]-ue[im1][3]*buf[im1][1])+
xxcon2*(buf[ip1][3]-2.0*buf[i][3]+buf[im1][3])+
dx4tx1*( ue[ip1][3]-2.0* ue[i][3]+ ue[im1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
tx2*(buf[ip1][1]*(c1*ue[ip1][4]-c2*q[ip1])-
buf[im1][1]*(c1*ue[im1][4]-c2*q[im1]))+
0.5*xxcon3*(buf[ip1][0]-2.0*buf[i][0]+buf[im1][0])+
xxcon4*(cuf[ip1]-2.0*cuf[i]+cuf[im1])+
xxcon5*(buf[ip1][4]-2.0*buf[i][4]+buf[im1][4])+
dx5tx1*( ue[ip1][4]-2.0* ue[i][4]+ ue[im1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
i = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[i][m] - 4.0*ue[i+1][m] +ue[i+2][m]);
i = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[i-1][m] + 6.0*ue[i][m] -
4.0*ue[i+1][m] + ue[i+2][m]);
}
for (m = 0; m < 5; m++) {
for (i = 1*3; i <= grid_points[0]-3*1-1; i++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m] + ue[i+2][m]);
}
}
for (m = 0; m < 5; m++) {
i = grid_points[0]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] +
6.0*ue[i][m] - 4.0*ue[i+1][m]);
i = grid_points[0]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[i-2][m] - 4.0*ue[i-1][m] + 5.0*ue[i][m]);
}
}
}
/*--------------------------------------------------------------------
c eta-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (k = 1; k < grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[j][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m <= 4; m++) {
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] +
buf[j][3] * buf[j][3];
q[j] = 0.5*(buf[j][1]*ue[j][1] + buf[j][2]*ue[j][2] +
buf[j][3]*ue[j][3]);
}
for (j = 1; j < grid_points[1]-1; j++) {
jm1 = j-1;
jp1 = j+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
ty2*( ue[jp1][2]-ue[jm1][2] )+
dy1ty1*(ue[jp1][0]-2.0*ue[j][0]+ue[jm1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
ty2*(ue[jp1][1]*buf[jp1][2]-ue[jm1][1]*buf[jm1][2])+
yycon2*(buf[jp1][1]-2.0*buf[j][1]+buf[jm1][1])+
dy2ty1*( ue[jp1][1]-2.0* ue[j][1]+ ue[jm1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
ty2*((ue[jp1][2]*buf[jp1][2]+c2*(ue[jp1][4]-q[jp1]))-
(ue[jm1][2]*buf[jm1][2]+c2*(ue[jm1][4]-q[jm1])))+
yycon1*(buf[jp1][2]-2.0*buf[j][2]+buf[jm1][2])+
dy3ty1*( ue[jp1][2]-2.0*ue[j][2] +ue[jm1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
ty2*(ue[jp1][3]*buf[jp1][2]-ue[jm1][3]*buf[jm1][2])+
yycon2*(buf[jp1][3]-2.0*buf[j][3]+buf[jm1][3])+
dy4ty1*( ue[jp1][3]-2.0*ue[j][3]+ ue[jm1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
ty2*(buf[jp1][2]*(c1*ue[jp1][4]-c2*q[jp1])-
buf[jm1][2]*(c1*ue[jm1][4]-c2*q[jm1]))+
0.5*yycon3*(buf[jp1][0]-2.0*buf[j][0]+
buf[jm1][0])+
yycon4*(cuf[jp1]-2.0*cuf[j]+cuf[jm1])+
yycon5*(buf[jp1][4]-2.0*buf[j][4]+buf[jm1][4])+
dy5ty1*(ue[jp1][4]-2.0*ue[j][4]+ue[jm1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
j = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[j][m] - 4.0*ue[j+1][m] +ue[j+2][m]);
j = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[j-1][m] + 6.0*ue[j][m] -
4.0*ue[j+1][m] + ue[j+2][m]);
}
for (m = 0; m < 5; m++) {
for (j = 1*3; j <= grid_points[1]-3*1-1; j++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m] + ue[j+2][m]);
}
}
for (m = 0; m < 5; m++) {
j = grid_points[1]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] +
6.0*ue[j][m] - 4.0*ue[j+1][m]);
j = grid_points[1]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[j-2][m] - 4.0*ue[j-1][m] + 5.0*ue[j][m]);
}
}
}
/*--------------------------------------------------------------------
c zeta-direction flux differences
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (j = 1; j < grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++) {
ue[k][m] = dtemp[m];
}
dtpp = 1.0/dtemp[0];
for (m = 1; m <= 4; m++) {
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] +
buf[k][2] * buf[k][2];
q[k] = 0.5*(buf[k][1]*ue[k][1] + buf[k][2]*ue[k][2] +
buf[k][3]*ue[k][3]);
}
for (k = 1; k < grid_points[2]-1; k++) {
km1 = k-1;
kp1 = k+1;
forcing[i][j][k][0] = forcing[i][j][k][0] -
tz2*( ue[kp1][3]-ue[km1][3] )+
dz1tz1*(ue[kp1][0]-2.0*ue[k][0]+ue[km1][0]);
forcing[i][j][k][1] = forcing[i][j][k][1] -
tz2 * (ue[kp1][1]*buf[kp1][3]-ue[km1][1]*buf[km1][3])+
zzcon2*(buf[kp1][1]-2.0*buf[k][1]+buf[km1][1])+
dz2tz1*( ue[kp1][1]-2.0* ue[k][1]+ ue[km1][1]);
forcing[i][j][k][2] = forcing[i][j][k][2] -
tz2 * (ue[kp1][2]*buf[kp1][3]-ue[km1][2]*buf[km1][3])+
zzcon2*(buf[kp1][2]-2.0*buf[k][2]+buf[km1][2])+
dz3tz1*(ue[kp1][2]-2.0*ue[k][2]+ue[km1][2]);
forcing[i][j][k][3] = forcing[i][j][k][3] -
tz2 * ((ue[kp1][3]*buf[kp1][3]+c2*(ue[kp1][4]-q[kp1]))-
(ue[km1][3]*buf[km1][3]+c2*(ue[km1][4]-q[km1])))+
zzcon1*(buf[kp1][3]-2.0*buf[k][3]+buf[km1][3])+
dz4tz1*( ue[kp1][3]-2.0*ue[k][3] +ue[km1][3]);
forcing[i][j][k][4] = forcing[i][j][k][4] -
tz2 * (buf[kp1][3]*(c1*ue[kp1][4]-c2*q[kp1])-
buf[km1][3]*(c1*ue[km1][4]-c2*q[km1]))+
0.5*zzcon3*(buf[kp1][0]-2.0*buf[k][0]
+buf[km1][0])+
zzcon4*(cuf[kp1]-2.0*cuf[k]+cuf[km1])+
zzcon5*(buf[kp1][4]-2.0*buf[k][4]+buf[km1][4])+
dz5tz1*( ue[kp1][4]-2.0*ue[k][4]+ ue[km1][4]);
}
/*--------------------------------------------------------------------
c Fourth-order dissipation
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
k = 1;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(5.0*ue[k][m] - 4.0*ue[k+1][m] +ue[k+2][m]);
k = 2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(-4.0*ue[k-1][m] + 6.0*ue[k][m] -
4.0*ue[k+1][m] + ue[k+2][m]);
}
for (m = 0; m < 5; m++) {
for (k = 1*3; k <= grid_points[2]-3*1-1; k++) {
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp*
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m] + ue[k+2][m]);
}
}
for (m = 0; m < 5; m++) {
k = grid_points[2]-3;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] +
6.0*ue[k][m] - 4.0*ue[k+1][m]);
k = grid_points[2]-2;
forcing[i][j][k][m] = forcing[i][j][k][m] - dssp *
(ue[k-2][m] - 4.0*ue[k-1][m] + 5.0*ue[k][m]);
}
}
}
/*--------------------------------------------------------------------
c now change the sign of the forcing function,
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
forcing[i][j][k][m] = -1.0 * forcing[i][j][k][m];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void exact_solution(double xi, double eta, double zeta,
double dtemp[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c this function returns the exact solution at point xi, eta, zeta
c-------------------------------------------------------------------*/
int m;
for (m = 0; m < 5; m++) {
dtemp[m] = ce[m][0] +
xi*(ce[m][1] + xi*(ce[m][4] + xi*(ce[m][7]
+ xi*ce[m][10]))) +
eta*(ce[m][2] + eta*(ce[m][5] + eta*(ce[m][8]
+ eta*ce[m][11])))+
zeta*(ce[m][3] + zeta*(ce[m][6] + zeta*(ce[m][9] +
zeta*ce[m][12])));
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void initialize(void) {
#pragma omp parallel
{
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This subroutine initializes the field variable u using
c tri-linear transfinite interpolation of the boundary values
c-------------------------------------------------------------------*/
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
/*--------------------------------------------------------------------
c Later (in compute_rhs) we compute 1/u for every element. A few of
c the corner elements are not used, but it convenient (and faster)
c to compute the whole thing with a simple loop. Make sure those
c values are nonzero by initializing the whole thing here.
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < IMAX; i++) {
for (j = 0; j < IMAX; j++) {
for (k = 0; k < IMAX; k++) {
for (m = 0; m < 5; m++) {
u[i][j][k][m] = 1.0;
}
}
}
}
/*--------------------------------------------------------------------
c first store the "interpolated" values everywhere on the grid
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
for (ix = 0; ix < 2; ix++) {
exact_solution((double)ix, eta, zeta,
&(Pface[ix][0][0]));
}
for (iy = 0; iy < 2; iy++) {
exact_solution(xi, (double)iy , zeta,
&Pface[iy][1][0]);
}
for (iz = 0; iz < 2; iz++) {
exact_solution(xi, eta, (double)iz,
&Pface[iz][2][0]);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] +
(1.0-xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] +
(1.0-eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] +
(1.0-zeta) * Pface[0][2][m];
u[i][j][k][m] = Pxi + Peta + Pzeta -
Pxi*Peta - Pxi*Pzeta - Peta*Pzeta +
Pxi*Peta*Pzeta;
}
}
}
}
/*--------------------------------------------------------------------
c now store the exact values on the boundaries
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c west face
c-------------------------------------------------------------------*/
i = 0;
xi = 0.0;
#pragma omp for nowait
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c east face
c-------------------------------------------------------------------*/
i = grid_points[0]-1;
xi = 1.0;
#pragma omp for
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c south face
c-------------------------------------------------------------------*/
j = 0;
eta = 0.0;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c north face
c-------------------------------------------------------------------*/
j = grid_points[1]-1;
eta = 1.0;
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (k = 0; k < grid_points[2]; k++) {
zeta = (double)k * dnzm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c bottom face
c-------------------------------------------------------------------*/
k = 0;
zeta = 0.0;
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i *dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
/*--------------------------------------------------------------------
c top face
c-------------------------------------------------------------------*/
k = grid_points[2]-1;
zeta = 1.0;
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
xi = (double)i * dnxm1;
for (j = 0; j < grid_points[1]; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[i][j][k][m] = temp[m];
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsinit(void) {
#pragma omp parallel
{
int i, j, k, m, n;
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c zero the whole left hand side for starters
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
for (n = 0; n < 5; n++) {
lhs[i][j][k][0][m][n] = 0.0;
lhs[i][j][k][1][m][n] = 0.0;
lhs[i][j][k][2][m][n] = 0.0;
}
}
}
}
}
/*--------------------------------------------------------------------
c next, set all diagonal values to 1. This is overkill, but convenient
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
lhs[i][j][k][1][m][m] = 1.0;
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsx(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side in the xi-direction
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c determine a (labeled f) and n jacobians
c-------------------------------------------------------------------*/
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (i = 0; i < grid_points[0]; i++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
fjac[ i][ j][ k][0][0] = 0.0;
fjac[ i][ j][ k][0][1] = 1.0;
fjac[ i][ j][ k][0][2] = 0.0;
fjac[ i][ j][ k][0][3] = 0.0;
fjac[ i][ j][ k][0][4] = 0.0;
fjac[ i][ j][ k][1][0] = -(u[i][j][k][1] * tmp2 *
u[i][j][k][1])
+ c2 * 0.50 * (u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2;
fjac[i][j][k][1][1] = ( 2.0 - c2 )
* ( u[i][j][k][1] / u[i][j][k][0] );
fjac[i][j][k][1][2] = - c2 * ( u[i][j][k][2] * tmp1 );
fjac[i][j][k][1][3] = - c2 * ( u[i][j][k][3] * tmp1 );
fjac[i][j][k][1][4] = c2;
fjac[i][j][k][2][0] = - ( u[i][j][k][1]*u[i][j][k][2] ) * tmp2;
fjac[i][j][k][2][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][2][3] = 0.0;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = - ( u[i][j][k][1]*u[i][j][k][3] ) * tmp2;
fjac[i][j][k][3][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][2] = 0.0;
fjac[i][j][k][3][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2
- c1 * ( u[i][j][k][4] * tmp1 ) )
* ( u[i][j][k][1] * tmp1 );
fjac[i][j][k][4][1] = c1 * u[i][j][k][4] * tmp1
- 0.50 * c2
* ( 3.0*u[i][j][k][1]*u[i][j][k][1]
+ u[i][j][k][2]*u[i][j][k][2]
+ u[i][j][k][3]*u[i][j][k][3] ) * tmp2;
fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][1] )
* tmp2;
fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][3]*u[i][j][k][1] )
* tmp2;
fjac[i][j][k][4][4] = c1 * ( u[i][j][k][1] * tmp1 );
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - con43 * c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = con43 * c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 ) * tmp1;
}
/*--------------------------------------------------------------------
c now jacobians set, so form left hand side in x direction
c-------------------------------------------------------------------*/
for (i = 1; i < grid_points[0]-1; i++) {
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i-1][j][k][0][0]
- tmp1 * njac[i-1][j][k][0][0]
- tmp1 * dx1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i-1][j][k][0][1]
- tmp1 * njac[i-1][j][k][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i-1][j][k][0][2]
- tmp1 * njac[i-1][j][k][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i-1][j][k][0][3]
- tmp1 * njac[i-1][j][k][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i-1][j][k][0][4]
- tmp1 * njac[i-1][j][k][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i-1][j][k][1][0]
- tmp1 * njac[i-1][j][k][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i-1][j][k][1][1]
- tmp1 * njac[i-1][j][k][1][1]
- tmp1 * dx2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i-1][j][k][1][2]
- tmp1 * njac[i-1][j][k][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i-1][j][k][1][3]
- tmp1 * njac[i-1][j][k][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i-1][j][k][1][4]
- tmp1 * njac[i-1][j][k][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i-1][j][k][2][0]
- tmp1 * njac[i-1][j][k][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i-1][j][k][2][1]
- tmp1 * njac[i-1][j][k][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i-1][j][k][2][2]
- tmp1 * njac[i-1][j][k][2][2]
- tmp1 * dx3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i-1][j][k][2][3]
- tmp1 * njac[i-1][j][k][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i-1][j][k][2][4]
- tmp1 * njac[i-1][j][k][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i-1][j][k][3][0]
- tmp1 * njac[i-1][j][k][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i-1][j][k][3][1]
- tmp1 * njac[i-1][j][k][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i-1][j][k][3][2]
- tmp1 * njac[i-1][j][k][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i-1][j][k][3][3]
- tmp1 * njac[i-1][j][k][3][3]
- tmp1 * dx4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i-1][j][k][3][4]
- tmp1 * njac[i-1][j][k][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i-1][j][k][4][0]
- tmp1 * njac[i-1][j][k][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i-1][j][k][4][1]
- tmp1 * njac[i-1][j][k][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i-1][j][k][4][2]
- tmp1 * njac[i-1][j][k][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i-1][j][k][4][3]
- tmp1 * njac[i-1][j][k][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i-1][j][k][4][4]
- tmp1 * njac[i-1][j][k][4][4]
- tmp1 * dx5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dx1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dx2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dx3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dx4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dx5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i+1][j][k][0][0]
- tmp1 * njac[i+1][j][k][0][0]
- tmp1 * dx1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i+1][j][k][0][1]
- tmp1 * njac[i+1][j][k][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i+1][j][k][0][2]
- tmp1 * njac[i+1][j][k][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i+1][j][k][0][3]
- tmp1 * njac[i+1][j][k][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i+1][j][k][0][4]
- tmp1 * njac[i+1][j][k][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i+1][j][k][1][0]
- tmp1 * njac[i+1][j][k][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i+1][j][k][1][1]
- tmp1 * njac[i+1][j][k][1][1]
- tmp1 * dx2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i+1][j][k][1][2]
- tmp1 * njac[i+1][j][k][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i+1][j][k][1][3]
- tmp1 * njac[i+1][j][k][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i+1][j][k][1][4]
- tmp1 * njac[i+1][j][k][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i+1][j][k][2][0]
- tmp1 * njac[i+1][j][k][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i+1][j][k][2][1]
- tmp1 * njac[i+1][j][k][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i+1][j][k][2][2]
- tmp1 * njac[i+1][j][k][2][2]
- tmp1 * dx3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i+1][j][k][2][3]
- tmp1 * njac[i+1][j][k][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i+1][j][k][2][4]
- tmp1 * njac[i+1][j][k][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i+1][j][k][3][0]
- tmp1 * njac[i+1][j][k][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i+1][j][k][3][1]
- tmp1 * njac[i+1][j][k][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i+1][j][k][3][2]
- tmp1 * njac[i+1][j][k][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i+1][j][k][3][3]
- tmp1 * njac[i+1][j][k][3][3]
- tmp1 * dx4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i+1][j][k][3][4]
- tmp1 * njac[i+1][j][k][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i+1][j][k][4][0]
- tmp1 * njac[i+1][j][k][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i+1][j][k][4][1]
- tmp1 * njac[i+1][j][k][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i+1][j][k][4][2]
- tmp1 * njac[i+1][j][k][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i+1][j][k][4][3]
- tmp1 * njac[i+1][j][k][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i+1][j][k][4][4]
- tmp1 * njac[i+1][j][k][4][4]
- tmp1 * dx5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsy(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three y-factors
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c Compute the indices for storing the tri-diagonal matrix;
c determine a (labeled f) and n jacobians for cell c
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[ i][ j][ k][0][0] = 0.0;
fjac[ i][ j][ k][0][1] = 0.0;
fjac[ i][ j][ k][0][2] = 1.0;
fjac[ i][ j][ k][0][3] = 0.0;
fjac[ i][ j][ k][0][4] = 0.0;
fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][2] )
* tmp2;
fjac[i][j][k][1][1] = u[i][j][k][2] * tmp1;
fjac[i][j][k][1][2] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][3] = 0.0;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][2]*tmp2)
+ 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][2][1] = - c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][2][2] = ( 2.0 - c2 )
* u[i][j][k][2] * tmp1;
fjac[i][j][k][2][3] = - c2 * u[i][j][k][3] * tmp1;
fjac[i][j][k][2][4] = c2;
fjac[i][j][k][3][0] = - ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][3][1] = 0.0;
fjac[i][j][k][3][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][3][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][3][4] = 0.0;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2
- c1 * u[i][j][k][4] * tmp1 )
* u[i][j][k][2] * tmp1;
fjac[i][j][k][4][1] = - c2 * u[i][j][k][1]*u[i][j][k][2]
* tmp2;
fjac[i][j][k][4][2] = c1 * u[i][j][k][4] * tmp1
- 0.50 * c2
* ( ( u[i][j][k][1]*u[i][j][k][1]
+ 3.0 * u[i][j][k][2]*u[i][j][k][2]
+ u[i][j][k][3]*u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][4][3] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][4] = c1 * u[i][j][k][2] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - con43 * c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = con43 * c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = c3c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 ) * tmp1;
}
}
}
/*--------------------------------------------------------------------
c now joacobians set, so form left hand side in y direction
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = dt * ty1;
tmp2 = dt * ty2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j-1][k][0][0]
- tmp1 * njac[i][j-1][k][0][0]
- tmp1 * dy1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j-1][k][0][1]
- tmp1 * njac[i][j-1][k][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j-1][k][0][2]
- tmp1 * njac[i][j-1][k][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j-1][k][0][3]
- tmp1 * njac[i][j-1][k][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j-1][k][0][4]
- tmp1 * njac[i][j-1][k][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j-1][k][1][0]
- tmp1 * njac[i][j-1][k][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j-1][k][1][1]
- tmp1 * njac[i][j-1][k][1][1]
- tmp1 * dy2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j-1][k][1][2]
- tmp1 * njac[i][j-1][k][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j-1][k][1][3]
- tmp1 * njac[i][j-1][k][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j-1][k][1][4]
- tmp1 * njac[i][j-1][k][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j-1][k][2][0]
- tmp1 * njac[i][j-1][k][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j-1][k][2][1]
- tmp1 * njac[i][j-1][k][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j-1][k][2][2]
- tmp1 * njac[i][j-1][k][2][2]
- tmp1 * dy3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j-1][k][2][3]
- tmp1 * njac[i][j-1][k][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j-1][k][2][4]
- tmp1 * njac[i][j-1][k][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j-1][k][3][0]
- tmp1 * njac[i][j-1][k][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j-1][k][3][1]
- tmp1 * njac[i][j-1][k][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j-1][k][3][2]
- tmp1 * njac[i][j-1][k][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j-1][k][3][3]
- tmp1 * njac[i][j-1][k][3][3]
- tmp1 * dy4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j-1][k][3][4]
- tmp1 * njac[i][j-1][k][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j-1][k][4][0]
- tmp1 * njac[i][j-1][k][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j-1][k][4][1]
- tmp1 * njac[i][j-1][k][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j-1][k][4][2]
- tmp1 * njac[i][j-1][k][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j-1][k][4][3]
- tmp1 * njac[i][j-1][k][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j-1][k][4][4]
- tmp1 * njac[i][j-1][k][4][4]
- tmp1 * dy5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dy1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dy2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dy3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dy4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dy5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j+1][k][0][0]
- tmp1 * njac[i][j+1][k][0][0]
- tmp1 * dy1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j+1][k][0][1]
- tmp1 * njac[i][j+1][k][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j+1][k][0][2]
- tmp1 * njac[i][j+1][k][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j+1][k][0][3]
- tmp1 * njac[i][j+1][k][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j+1][k][0][4]
- tmp1 * njac[i][j+1][k][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j+1][k][1][0]
- tmp1 * njac[i][j+1][k][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j+1][k][1][1]
- tmp1 * njac[i][j+1][k][1][1]
- tmp1 * dy2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j+1][k][1][2]
- tmp1 * njac[i][j+1][k][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j+1][k][1][3]
- tmp1 * njac[i][j+1][k][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j+1][k][1][4]
- tmp1 * njac[i][j+1][k][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j+1][k][2][0]
- tmp1 * njac[i][j+1][k][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j+1][k][2][1]
- tmp1 * njac[i][j+1][k][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j+1][k][2][2]
- tmp1 * njac[i][j+1][k][2][2]
- tmp1 * dy3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j+1][k][2][3]
- tmp1 * njac[i][j+1][k][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j+1][k][2][4]
- tmp1 * njac[i][j+1][k][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j+1][k][3][0]
- tmp1 * njac[i][j+1][k][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j+1][k][3][1]
- tmp1 * njac[i][j+1][k][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j+1][k][3][2]
- tmp1 * njac[i][j+1][k][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j+1][k][3][3]
- tmp1 * njac[i][j+1][k][3][3]
- tmp1 * dy4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j+1][k][3][4]
- tmp1 * njac[i][j+1][k][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j+1][k][4][0]
- tmp1 * njac[i][j+1][k][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j+1][k][4][1]
- tmp1 * njac[i][j+1][k][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j+1][k][4][2]
- tmp1 * njac[i][j+1][k][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j+1][k][4][3]
- tmp1 * njac[i][j+1][k][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j+1][k][4][4]
- tmp1 * njac[i][j+1][k][4][4]
- tmp1 * dy5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void lhsz(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c This function computes the left hand side for the three z-factors
c-------------------------------------------------------------------*/
int i, j, k;
/*--------------------------------------------------------------------
c Compute the indices for storing the block-diagonal matrix;
c determine c (labeled f) and s jacobians
c---------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 0; k < grid_points[2]; k++) {
tmp1 = 1.0 / u[i][j][k][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[i][j][k][0][0] = 0.0;
fjac[i][j][k][0][1] = 0.0;
fjac[i][j][k][0][2] = 0.0;
fjac[i][j][k][0][3] = 1.0;
fjac[i][j][k][0][4] = 0.0;
fjac[i][j][k][1][0] = - ( u[i][j][k][1]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][1][1] = u[i][j][k][3] * tmp1;
fjac[i][j][k][1][2] = 0.0;
fjac[i][j][k][1][3] = u[i][j][k][1] * tmp1;
fjac[i][j][k][1][4] = 0.0;
fjac[i][j][k][2][0] = - ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][2][1] = 0.0;
fjac[i][j][k][2][2] = u[i][j][k][3] * tmp1;
fjac[i][j][k][2][3] = u[i][j][k][2] * tmp1;
fjac[i][j][k][2][4] = 0.0;
fjac[i][j][k][3][0] = - (u[i][j][k][3]*u[i][j][k][3] * tmp2 )
+ 0.50 * c2 * ( ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] ) * tmp2 );
fjac[i][j][k][3][1] = - c2 * u[i][j][k][1] * tmp1;
fjac[i][j][k][3][2] = - c2 * u[i][j][k][2] * tmp1;
fjac[i][j][k][3][3] = ( 2.0 - c2 )
* u[i][j][k][3] * tmp1;
fjac[i][j][k][3][4] = c2;
fjac[i][j][k][4][0] = ( c2 * ( u[i][j][k][1] * u[i][j][k][1]
+ u[i][j][k][2] * u[i][j][k][2]
+ u[i][j][k][3] * u[i][j][k][3] )
* tmp2
- c1 * ( u[i][j][k][4] * tmp1 ) )
* ( u[i][j][k][3] * tmp1 );
fjac[i][j][k][4][1] = - c2 * ( u[i][j][k][1]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][2] = - c2 * ( u[i][j][k][2]*u[i][j][k][3] )
* tmp2;
fjac[i][j][k][4][3] = c1 * ( u[i][j][k][4] * tmp1 )
- 0.50 * c2
* ( ( u[i][j][k][1]*u[i][j][k][1]
+ u[i][j][k][2]*u[i][j][k][2]
+ 3.0*u[i][j][k][3]*u[i][j][k][3] )
* tmp2 );
fjac[i][j][k][4][4] = c1 * u[i][j][k][3] * tmp1;
njac[i][j][k][0][0] = 0.0;
njac[i][j][k][0][1] = 0.0;
njac[i][j][k][0][2] = 0.0;
njac[i][j][k][0][3] = 0.0;
njac[i][j][k][0][4] = 0.0;
njac[i][j][k][1][0] = - c3c4 * tmp2 * u[i][j][k][1];
njac[i][j][k][1][1] = c3c4 * tmp1;
njac[i][j][k][1][2] = 0.0;
njac[i][j][k][1][3] = 0.0;
njac[i][j][k][1][4] = 0.0;
njac[i][j][k][2][0] = - c3c4 * tmp2 * u[i][j][k][2];
njac[i][j][k][2][1] = 0.0;
njac[i][j][k][2][2] = c3c4 * tmp1;
njac[i][j][k][2][3] = 0.0;
njac[i][j][k][2][4] = 0.0;
njac[i][j][k][3][0] = - con43 * c3c4 * tmp2 * u[i][j][k][3];
njac[i][j][k][3][1] = 0.0;
njac[i][j][k][3][2] = 0.0;
njac[i][j][k][3][3] = con43 * c3 * c4 * tmp1;
njac[i][j][k][3][4] = 0.0;
njac[i][j][k][4][0] = - ( c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][1]))
- ( c3c4 - c1345 ) * tmp3 * (pow2(u[i][j][k][2]))
- ( con43 * c3c4
- c1345 ) * tmp3 * (pow2(u[i][j][k][3]))
- c1345 * tmp2 * u[i][j][k][4];
njac[i][j][k][4][1] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][1];
njac[i][j][k][4][2] = ( c3c4 - c1345 ) * tmp2 * u[i][j][k][2];
njac[i][j][k][4][3] = ( con43 * c3c4
- c1345 ) * tmp2 * u[i][j][k][3];
njac[i][j][k][4][4] = ( c1345 )* tmp1;
}
}
}
/*--------------------------------------------------------------------
c now jacobians set, so form left hand side in z direction
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[i][j][k][AA][0][0] = - tmp2 * fjac[i][j][k-1][0][0]
- tmp1 * njac[i][j][k-1][0][0]
- tmp1 * dz1;
lhs[i][j][k][AA][0][1] = - tmp2 * fjac[i][j][k-1][0][1]
- tmp1 * njac[i][j][k-1][0][1];
lhs[i][j][k][AA][0][2] = - tmp2 * fjac[i][j][k-1][0][2]
- tmp1 * njac[i][j][k-1][0][2];
lhs[i][j][k][AA][0][3] = - tmp2 * fjac[i][j][k-1][0][3]
- tmp1 * njac[i][j][k-1][0][3];
lhs[i][j][k][AA][0][4] = - tmp2 * fjac[i][j][k-1][0][4]
- tmp1 * njac[i][j][k-1][0][4];
lhs[i][j][k][AA][1][0] = - tmp2 * fjac[i][j][k-1][1][0]
- tmp1 * njac[i][j][k-1][1][0];
lhs[i][j][k][AA][1][1] = - tmp2 * fjac[i][j][k-1][1][1]
- tmp1 * njac[i][j][k-1][1][1]
- tmp1 * dz2;
lhs[i][j][k][AA][1][2] = - tmp2 * fjac[i][j][k-1][1][2]
- tmp1 * njac[i][j][k-1][1][2];
lhs[i][j][k][AA][1][3] = - tmp2 * fjac[i][j][k-1][1][3]
- tmp1 * njac[i][j][k-1][1][3];
lhs[i][j][k][AA][1][4] = - tmp2 * fjac[i][j][k-1][1][4]
- tmp1 * njac[i][j][k-1][1][4];
lhs[i][j][k][AA][2][0] = - tmp2 * fjac[i][j][k-1][2][0]
- tmp1 * njac[i][j][k-1][2][0];
lhs[i][j][k][AA][2][1] = - tmp2 * fjac[i][j][k-1][2][1]
- tmp1 * njac[i][j][k-1][2][1];
lhs[i][j][k][AA][2][2] = - tmp2 * fjac[i][j][k-1][2][2]
- tmp1 * njac[i][j][k-1][2][2]
- tmp1 * dz3;
lhs[i][j][k][AA][2][3] = - tmp2 * fjac[i][j][k-1][2][3]
- tmp1 * njac[i][j][k-1][2][3];
lhs[i][j][k][AA][2][4] = - tmp2 * fjac[i][j][k-1][2][4]
- tmp1 * njac[i][j][k-1][2][4];
lhs[i][j][k][AA][3][0] = - tmp2 * fjac[i][j][k-1][3][0]
- tmp1 * njac[i][j][k-1][3][0];
lhs[i][j][k][AA][3][1] = - tmp2 * fjac[i][j][k-1][3][1]
- tmp1 * njac[i][j][k-1][3][1];
lhs[i][j][k][AA][3][2] = - tmp2 * fjac[i][j][k-1][3][2]
- tmp1 * njac[i][j][k-1][3][2];
lhs[i][j][k][AA][3][3] = - tmp2 * fjac[i][j][k-1][3][3]
- tmp1 * njac[i][j][k-1][3][3]
- tmp1 * dz4;
lhs[i][j][k][AA][3][4] = - tmp2 * fjac[i][j][k-1][3][4]
- tmp1 * njac[i][j][k-1][3][4];
lhs[i][j][k][AA][4][0] = - tmp2 * fjac[i][j][k-1][4][0]
- tmp1 * njac[i][j][k-1][4][0];
lhs[i][j][k][AA][4][1] = - tmp2 * fjac[i][j][k-1][4][1]
- tmp1 * njac[i][j][k-1][4][1];
lhs[i][j][k][AA][4][2] = - tmp2 * fjac[i][j][k-1][4][2]
- tmp1 * njac[i][j][k-1][4][2];
lhs[i][j][k][AA][4][3] = - tmp2 * fjac[i][j][k-1][4][3]
- tmp1 * njac[i][j][k-1][4][3];
lhs[i][j][k][AA][4][4] = - tmp2 * fjac[i][j][k-1][4][4]
- tmp1 * njac[i][j][k-1][4][4]
- tmp1 * dz5;
lhs[i][j][k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][0][0]
+ tmp1 * 2.0 * dz1;
lhs[i][j][k][BB][0][1] = tmp1 * 2.0 * njac[i][j][k][0][1];
lhs[i][j][k][BB][0][2] = tmp1 * 2.0 * njac[i][j][k][0][2];
lhs[i][j][k][BB][0][3] = tmp1 * 2.0 * njac[i][j][k][0][3];
lhs[i][j][k][BB][0][4] = tmp1 * 2.0 * njac[i][j][k][0][4];
lhs[i][j][k][BB][1][0] = tmp1 * 2.0 * njac[i][j][k][1][0];
lhs[i][j][k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][1][1]
+ tmp1 * 2.0 * dz2;
lhs[i][j][k][BB][1][2] = tmp1 * 2.0 * njac[i][j][k][1][2];
lhs[i][j][k][BB][1][3] = tmp1 * 2.0 * njac[i][j][k][1][3];
lhs[i][j][k][BB][1][4] = tmp1 * 2.0 * njac[i][j][k][1][4];
lhs[i][j][k][BB][2][0] = tmp1 * 2.0 * njac[i][j][k][2][0];
lhs[i][j][k][BB][2][1] = tmp1 * 2.0 * njac[i][j][k][2][1];
lhs[i][j][k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][2][2]
+ tmp1 * 2.0 * dz3;
lhs[i][j][k][BB][2][3] = tmp1 * 2.0 * njac[i][j][k][2][3];
lhs[i][j][k][BB][2][4] = tmp1 * 2.0 * njac[i][j][k][2][4];
lhs[i][j][k][BB][3][0] = tmp1 * 2.0 * njac[i][j][k][3][0];
lhs[i][j][k][BB][3][1] = tmp1 * 2.0 * njac[i][j][k][3][1];
lhs[i][j][k][BB][3][2] = tmp1 * 2.0 * njac[i][j][k][3][2];
lhs[i][j][k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][3][3]
+ tmp1 * 2.0 * dz4;
lhs[i][j][k][BB][3][4] = tmp1 * 2.0 * njac[i][j][k][3][4];
lhs[i][j][k][BB][4][0] = tmp1 * 2.0 * njac[i][j][k][4][0];
lhs[i][j][k][BB][4][1] = tmp1 * 2.0 * njac[i][j][k][4][1];
lhs[i][j][k][BB][4][2] = tmp1 * 2.0 * njac[i][j][k][4][2];
lhs[i][j][k][BB][4][3] = tmp1 * 2.0 * njac[i][j][k][4][3];
lhs[i][j][k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][j][k][4][4]
+ tmp1 * 2.0 * dz5;
lhs[i][j][k][CC][0][0] = tmp2 * fjac[i][j][k+1][0][0]
- tmp1 * njac[i][j][k+1][0][0]
- tmp1 * dz1;
lhs[i][j][k][CC][0][1] = tmp2 * fjac[i][j][k+1][0][1]
- tmp1 * njac[i][j][k+1][0][1];
lhs[i][j][k][CC][0][2] = tmp2 * fjac[i][j][k+1][0][2]
- tmp1 * njac[i][j][k+1][0][2];
lhs[i][j][k][CC][0][3] = tmp2 * fjac[i][j][k+1][0][3]
- tmp1 * njac[i][j][k+1][0][3];
lhs[i][j][k][CC][0][4] = tmp2 * fjac[i][j][k+1][0][4]
- tmp1 * njac[i][j][k+1][0][4];
lhs[i][j][k][CC][1][0] = tmp2 * fjac[i][j][k+1][1][0]
- tmp1 * njac[i][j][k+1][1][0];
lhs[i][j][k][CC][1][1] = tmp2 * fjac[i][j][k+1][1][1]
- tmp1 * njac[i][j][k+1][1][1]
- tmp1 * dz2;
lhs[i][j][k][CC][1][2] = tmp2 * fjac[i][j][k+1][1][2]
- tmp1 * njac[i][j][k+1][1][2];
lhs[i][j][k][CC][1][3] = tmp2 * fjac[i][j][k+1][1][3]
- tmp1 * njac[i][j][k+1][1][3];
lhs[i][j][k][CC][1][4] = tmp2 * fjac[i][j][k+1][1][4]
- tmp1 * njac[i][j][k+1][1][4];
lhs[i][j][k][CC][2][0] = tmp2 * fjac[i][j][k+1][2][0]
- tmp1 * njac[i][j][k+1][2][0];
lhs[i][j][k][CC][2][1] = tmp2 * fjac[i][j][k+1][2][1]
- tmp1 * njac[i][j][k+1][2][1];
lhs[i][j][k][CC][2][2] = tmp2 * fjac[i][j][k+1][2][2]
- tmp1 * njac[i][j][k+1][2][2]
- tmp1 * dz3;
lhs[i][j][k][CC][2][3] = tmp2 * fjac[i][j][k+1][2][3]
- tmp1 * njac[i][j][k+1][2][3];
lhs[i][j][k][CC][2][4] = tmp2 * fjac[i][j][k+1][2][4]
- tmp1 * njac[i][j][k+1][2][4];
lhs[i][j][k][CC][3][0] = tmp2 * fjac[i][j][k+1][3][0]
- tmp1 * njac[i][j][k+1][3][0];
lhs[i][j][k][CC][3][1] = tmp2 * fjac[i][j][k+1][3][1]
- tmp1 * njac[i][j][k+1][3][1];
lhs[i][j][k][CC][3][2] = tmp2 * fjac[i][j][k+1][3][2]
- tmp1 * njac[i][j][k+1][3][2];
lhs[i][j][k][CC][3][3] = tmp2 * fjac[i][j][k+1][3][3]
- tmp1 * njac[i][j][k+1][3][3]
- tmp1 * dz4;
lhs[i][j][k][CC][3][4] = tmp2 * fjac[i][j][k+1][3][4]
- tmp1 * njac[i][j][k+1][3][4];
lhs[i][j][k][CC][4][0] = tmp2 * fjac[i][j][k+1][4][0]
- tmp1 * njac[i][j][k+1][4][0];
lhs[i][j][k][CC][4][1] = tmp2 * fjac[i][j][k+1][4][1]
- tmp1 * njac[i][j][k+1][4][1];
lhs[i][j][k][CC][4][2] = tmp2 * fjac[i][j][k+1][4][2]
- tmp1 * njac[i][j][k+1][4][2];
lhs[i][j][k][CC][4][3] = tmp2 * fjac[i][j][k+1][4][3]
- tmp1 * njac[i][j][k+1][4][3];
lhs[i][j][k][CC][4][4] = tmp2 * fjac[i][j][k+1][4][4]
- tmp1 * njac[i][j][k+1][4][4]
- tmp1 * dz5;
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void compute_rhs(void) {
int i, j, k, m;
double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1;
/*--------------------------------------------------------------------
c compute the reciprocal of density, and the kinetic energy,
c and the speed of sound.
c-------------------------------------------------------------------*/
#pragma omp for nowait
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
rho_inv = 1.0/u[i][j][k][0];
rho_i[i][j][k] = rho_inv;
us[i][j][k] = u[i][j][k][1] * rho_inv;
vs[i][j][k] = u[i][j][k][2] * rho_inv;
ws[i][j][k] = u[i][j][k][3] * rho_inv;
square[i][j][k] = 0.5 * (u[i][j][k][1]*u[i][j][k][1] +
u[i][j][k][2]*u[i][j][k][2] +
u[i][j][k][3]*u[i][j][k][3] ) * rho_inv;
qs[i][j][k] = square[i][j][k] * rho_inv;
}
}
}
/*--------------------------------------------------------------------
c copy the exact forcing term to the right hand side; because
c this forcing term is known, we can store it on the whole grid
c including the boundary
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 0; i < grid_points[0]; i++) {
for (j = 0; j < grid_points[1]; j++) {
for (k = 0; k < grid_points[2]; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = forcing[i][j][k][m];
}
}
}
}
/*--------------------------------------------------------------------
c compute xi-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
uijk = us[i][j][k];
up1 = us[i+1][j][k];
um1 = us[i-1][j][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dx1tx1 *
(u[i+1][j][k][0] - 2.0*u[i][j][k][0] +
u[i-1][j][k][0]) -
tx2 * (u[i+1][j][k][1] - u[i-1][j][k][1]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dx2tx1 *
(u[i+1][j][k][1] - 2.0*u[i][j][k][1] +
u[i-1][j][k][1]) +
xxcon2*con43 * (up1 - 2.0*uijk + um1) -
tx2 * (u[i+1][j][k][1]*up1 -
u[i-1][j][k][1]*um1 +
(u[i+1][j][k][4]- square[i+1][j][k]-
u[i-1][j][k][4]+ square[i-1][j][k])*
c2);
rhs[i][j][k][2] = rhs[i][j][k][2] + dx3tx1 *
(u[i+1][j][k][2] - 2.0*u[i][j][k][2] +
u[i-1][j][k][2]) +
xxcon2 * (vs[i+1][j][k] - 2.0*vs[i][j][k] +
vs[i-1][j][k]) -
tx2 * (u[i+1][j][k][2]*up1 -
u[i-1][j][k][2]*um1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dx4tx1 *
(u[i+1][j][k][3] - 2.0*u[i][j][k][3] +
u[i-1][j][k][3]) +
xxcon2 * (ws[i+1][j][k] - 2.0*ws[i][j][k] +
ws[i-1][j][k]) -
tx2 * (u[i+1][j][k][3]*up1 -
u[i-1][j][k][3]*um1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dx5tx1 *
(u[i+1][j][k][4] - 2.0*u[i][j][k][4] +
u[i-1][j][k][4]) +
xxcon3 * (qs[i+1][j][k] - 2.0*qs[i][j][k] +
qs[i-1][j][k]) +
xxcon4 * (up1*up1 - 2.0*uijk*uijk +
um1*um1) +
xxcon5 * (u[i+1][j][k][4]*rho_i[i+1][j][k] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i-1][j][k][4]*rho_i[i-1][j][k]) -
tx2 * ( (c1*u[i+1][j][k][4] -
c2*square[i+1][j][k])*up1 -
(c1*u[i-1][j][k][4] -
c2*square[i-1][j][k])*um1 );
}
}
}
/*--------------------------------------------------------------------
c add fourth order xi-direction dissipation
c-------------------------------------------------------------------*/
i = 1;
#pragma omp for nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +
u[i+2][j][k][m]);
}
}
}
i = 2;
#pragma omp for nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i-1][j][k][m] + 6.0*u[i][j][k][m] -
4.0*u[i+1][j][k][m] + u[i+2][j][k][m]);
}
}
}
#pragma omp for nowait
for (i = 3; i < grid_points[0]-3; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] +
u[i+2][j][k][m] );
}
}
}
}
i = grid_points[0]-3;
#pragma omp for nowait
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.0*u[i-1][j][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i+1][j][k][m] );
}
}
}
i = grid_points[0]-2;
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i-2][j][k][m] - 4.*u[i-1][j][k][m] +
5.0*u[i][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c compute eta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
vijk = vs[i][j][k];
vp1 = vs[i][j+1][k];
vm1 = vs[i][j-1][k];
rhs[i][j][k][0] = rhs[i][j][k][0] + dy1ty1 *
(u[i][j+1][k][0] - 2.0*u[i][j][k][0] +
u[i][j-1][k][0]) -
ty2 * (u[i][j+1][k][2] - u[i][j-1][k][2]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dy2ty1 *
(u[i][j+1][k][1] - 2.0*u[i][j][k][1] +
u[i][j-1][k][1]) +
yycon2 * (us[i][j+1][k] - 2.0*us[i][j][k] +
us[i][j-1][k]) -
ty2 * (u[i][j+1][k][1]*vp1 -
u[i][j-1][k][1]*vm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dy3ty1 *
(u[i][j+1][k][2] - 2.0*u[i][j][k][2] +
u[i][j-1][k][2]) +
yycon2*con43 * (vp1 - 2.0*vijk + vm1) -
ty2 * (u[i][j+1][k][2]*vp1 -
u[i][j-1][k][2]*vm1 +
(u[i][j+1][k][4] - square[i][j+1][k] -
u[i][j-1][k][4] + square[i][j-1][k])
*c2);
rhs[i][j][k][3] = rhs[i][j][k][3] + dy4ty1 *
(u[i][j+1][k][3] - 2.0*u[i][j][k][3] +
u[i][j-1][k][3]) +
yycon2 * (ws[i][j+1][k] - 2.0*ws[i][j][k] +
ws[i][j-1][k]) -
ty2 * (u[i][j+1][k][3]*vp1 -
u[i][j-1][k][3]*vm1);
rhs[i][j][k][4] = rhs[i][j][k][4] + dy5ty1 *
(u[i][j+1][k][4] - 2.0*u[i][j][k][4] +
u[i][j-1][k][4]) +
yycon3 * (qs[i][j+1][k] - 2.0*qs[i][j][k] +
qs[i][j-1][k]) +
yycon4 * (vp1*vp1 - 2.0*vijk*vijk +
vm1*vm1) +
yycon5 * (u[i][j+1][k][4]*rho_i[i][j+1][k] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i][j-1][k][4]*rho_i[i][j-1][k]) -
ty2 * ((c1*u[i][j+1][k][4] -
c2*square[i][j+1][k]) * vp1 -
(c1*u[i][j-1][k][4] -
c2*square[i][j-1][k]) * vm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order eta-direction dissipation
c-------------------------------------------------------------------*/
j = 1;
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +
u[i][j+2][k][m]);
}
}
}
j = 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i][j-1][k][m] + 6.0*u[i][j][k][m] -
4.0*u[i][j+1][k][m] + u[i][j+2][k][m]);
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 3; j < grid_points[1]-3; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] +
u[i][j+2][k][m] );
}
}
}
}
j = grid_points[1]-3;
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.0*u[i][j-1][k][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j+1][k][m] );
}
}
}
j = grid_points[1]-2;
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j-2][k][m] - 4.*u[i][j-1][k][m] +
5.*u[i][j][k][m] );
}
}
}
/*--------------------------------------------------------------------
c compute zeta-direction fluxes
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
wijk = ws[i][j][k];
wp1 = ws[i][j][k+1];
wm1 = ws[i][j][k-1];
rhs[i][j][k][0] = rhs[i][j][k][0] + dz1tz1 *
(u[i][j][k+1][0] - 2.0*u[i][j][k][0] +
u[i][j][k-1][0]) -
tz2 * (u[i][j][k+1][3] - u[i][j][k-1][3]);
rhs[i][j][k][1] = rhs[i][j][k][1] + dz2tz1 *
(u[i][j][k+1][1] - 2.0*u[i][j][k][1] +
u[i][j][k-1][1]) +
zzcon2 * (us[i][j][k+1] - 2.0*us[i][j][k] +
us[i][j][k-1]) -
tz2 * (u[i][j][k+1][1]*wp1 -
u[i][j][k-1][1]*wm1);
rhs[i][j][k][2] = rhs[i][j][k][2] + dz3tz1 *
(u[i][j][k+1][2] - 2.0*u[i][j][k][2] +
u[i][j][k-1][2]) +
zzcon2 * (vs[i][j][k+1] - 2.0*vs[i][j][k] +
vs[i][j][k-1]) -
tz2 * (u[i][j][k+1][2]*wp1 -
u[i][j][k-1][2]*wm1);
rhs[i][j][k][3] = rhs[i][j][k][3] + dz4tz1 *
(u[i][j][k+1][3] - 2.0*u[i][j][k][3] +
u[i][j][k-1][3]) +
zzcon2*con43 * (wp1 - 2.0*wijk + wm1) -
tz2 * (u[i][j][k+1][3]*wp1 -
u[i][j][k-1][3]*wm1 +
(u[i][j][k+1][4] - square[i][j][k+1] -
u[i][j][k-1][4] + square[i][j][k-1])
*c2);
rhs[i][j][k][4] = rhs[i][j][k][4] + dz5tz1 *
(u[i][j][k+1][4] - 2.0*u[i][j][k][4] +
u[i][j][k-1][4]) +
zzcon3 * (qs[i][j][k+1] - 2.0*qs[i][j][k] +
qs[i][j][k-1]) +
zzcon4 * (wp1*wp1 - 2.0*wijk*wijk +
wm1*wm1) +
zzcon5 * (u[i][j][k+1][4]*rho_i[i][j][k+1] -
2.0*u[i][j][k][4]*rho_i[i][j][k] +
u[i][j][k-1][4]*rho_i[i][j][k-1]) -
tz2 * ( (c1*u[i][j][k+1][4] -
c2*square[i][j][k+1])*wp1 -
(c1*u[i][j][k-1][4] -
c2*square[i][j][k-1])*wm1);
}
}
}
/*--------------------------------------------------------------------
c add fourth order zeta-direction dissipation
c-------------------------------------------------------------------*/
k = 1;
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m]- dssp *
( 5.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +
u[i][j][k+2][m]);
}
}
}
k = 2;
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
(-4.0*u[i][j][k-1][m] + 6.0*u[i][j][k][m] -
4.0*u[i][j][k+1][m] + u[i][j][k+2][m]);
}
}
}
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 3; k < grid_points[2]-3; k++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] +
u[i][j][k+2][m] );
}
}
}
}
k = grid_points[2]-3;
#pragma omp for nowait
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
6.0*u[i][j][k][m] - 4.0*u[i][j][k+1][m] );
}
}
}
k = grid_points[2]-2;
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (m = 0; m < 5; m++) {
rhs[i][j][k][m] = rhs[i][j][k][m] - dssp *
( u[i][j][k-2][m] - 4.0*u[i][j][k-1][m] +
5.0*u[i][j][k][m] );
}
}
}
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < 5; m++) {
for (i = 1; i < grid_points[0]-1; i++) {
rhs[i][j][k][m] = rhs[i][j][k][m] * dt;
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void set_constants(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
dnxm1 = 1.0 / (double)(grid_points[0]-1);
dnym1 = 1.0 / (double)(grid_points[1]-1);
dnzm1 = 1.0 / (double)(grid_points[2]-1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0-c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = max(dx3, dx4);
dymax = max(dy2, dy4);
dzmax = max(dz2, dz3);
dssp = 0.25 * max(dx1, max(dy1, dz1) );
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt*tx1;
dttx2 = dt*tx2;
dtty1 = dt*ty1;
dtty2 = dt*ty2;
dttz1 = dt*tz1;
dttz2 = dt*tz2;
c2dttx1 = 2.0*dttx1;
c2dtty1 = 2.0*dtty1;
c2dttz1 = 2.0*dttz1;
dtdssp = dt*dssp;
comz1 = dtdssp;
comz4 = 4.0*dtdssp;
comz5 = 5.0*dtdssp;
comz6 = 6.0*dtdssp;
c3c4tx3 = c3c4*tx3;
c3c4ty3 = c3c4*ty3;
c3c4tz3 = c3c4*tz3;
dx1tx1 = dx1*tx1;
dx2tx1 = dx2*tx1;
dx3tx1 = dx3*tx1;
dx4tx1 = dx4*tx1;
dx5tx1 = dx5*tx1;
dy1ty1 = dy1*ty1;
dy2ty1 = dy2*ty1;
dy3ty1 = dy3*ty1;
dy4ty1 = dy4*ty1;
dy5ty1 = dy5*ty1;
dz1tz1 = dz1*tz1;
dz2tz1 = dz2*tz1;
dz3tz1 = dz3*tz1;
dz4tz1 = dz4*tz1;
dz5tz1 = dz5*tz1;
c2iv = 2.5;
con43 = 4.0/3.0;
con16 = 1.0/6.0;
xxcon1 = c3c4tx3*con43*tx3;
xxcon2 = c3c4tx3*tx3;
xxcon3 = c3c4tx3*conz1*tx3;
xxcon4 = c3c4tx3*con16*tx3;
xxcon5 = c3c4tx3*c1c5*tx3;
yycon1 = c3c4ty3*con43*ty3;
yycon2 = c3c4ty3*ty3;
yycon3 = c3c4ty3*conz1*ty3;
yycon4 = c3c4ty3*con16*ty3;
yycon5 = c3c4ty3*c1c5*ty3;
zzcon1 = c3c4tz3*con43*tz3;
zzcon2 = c3c4tz3*tz3;
zzcon3 = c3c4tz3*conz1*tz3;
zzcon4 = c3c4tz3*con16*tz3;
zzcon5 = c3c4tz3*c1c5*tz3;
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void verify(int no_time_steps, char *class, boolean *verified) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c verification routine
c-------------------------------------------------------------------*/
double xcrref[5],xceref[5],xcrdif[5],xcedif[5],
epsilon, xce[5], xcr[5], dtref;
int m;
/*--------------------------------------------------------------------
c tolerance level
c-------------------------------------------------------------------*/
epsilon = 1.0e-08;
/*--------------------------------------------------------------------
c compute the error norm and the residual norm, and exit if not printing
c-------------------------------------------------------------------*/
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
for (m = 0; m < 5; m++) {
xcr[m] = xcr[m] / dt;
}
*class = 'U';
*verified = TRUE;
for (m = 0; m < 5; m++) {
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
/*--------------------------------------------------------------------
c reference data for 12X12X12 grids after 100 time steps, with DT = 1.0d-02
c-------------------------------------------------------------------*/
if (grid_points[0] == 12 &&
grid_points[1] == 12 &&
grid_points[2] == 12 &&
no_time_steps == 60) {
*class = 'S';
dtref = 1.0e-2;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.7034283709541311e-01;
xcrref[1] = 1.2975252070034097e-02;
xcrref[2] = 3.2527926989486055e-02;
xcrref[3] = 2.6436421275166801e-02;
xcrref[4] = 1.9211784131744430e-01;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 4.9976913345811579e-04;
xceref[1] = 4.5195666782961927e-05;
xceref[2] = 7.3973765172921357e-05;
xceref[3] = 7.3821238632439731e-05;
xceref[4] = 8.9269630987491446e-04;
/*--------------------------------------------------------------------
c reference data for 24X24X24 grids after 200 time steps, with DT = 0.8d-3
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 24 &&
grid_points[1] == 24 &&
grid_points[2] == 24 &&
no_time_steps == 200) {
*class = 'W';
dtref = 0.8e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 0.1125590409344e+03;
xcrref[1] = 0.1180007595731e+02;
xcrref[2] = 0.2710329767846e+02;
xcrref[3] = 0.2469174937669e+02;
xcrref[4] = 0.2638427874317e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 0.4419655736008e+01;
xceref[1] = 0.4638531260002e+00;
xceref[2] = 0.1011551749967e+01;
xceref[3] = 0.9235878729944e+00;
xceref[4] = 0.1018045837718e+02;
/*--------------------------------------------------------------------
c reference data for 64X64X64 grids after 200 time steps, with DT = 0.8d-3
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 64 &&
grid_points[1] == 64 &&
grid_points[2] == 64 &&
no_time_steps == 200) {
*class = 'A';
dtref = 0.8e-3;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.0806346714637264e+02;
xcrref[1] = 1.1319730901220813e+01;
xcrref[2] = 2.5974354511582465e+01;
xcrref[3] = 2.3665622544678910e+01;
xcrref[4] = 2.5278963211748344e+02;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 4.2348416040525025e+00;
xceref[1] = 4.4390282496995698e-01;
xceref[2] = 9.6692480136345650e-01;
xceref[3] = 8.8302063039765474e-01;
xceref[4] = 9.7379901770829278e+00;
/*--------------------------------------------------------------------
c reference data for 102X102X102 grids after 200 time steps,
c with DT = 3.0d-04
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 102 &&
grid_points[1] == 102 &&
grid_points[2] == 102 &&
no_time_steps == 200) {
*class = 'B';
dtref = 3.0e-4;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 1.4233597229287254e+03;
xcrref[1] = 9.9330522590150238e+01;
xcrref[2] = 3.5646025644535285e+02;
xcrref[3] = 3.2485447959084092e+02;
xcrref[4] = 3.2707541254659363e+03;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 5.2969847140936856e+01;
xceref[1] = 4.4632896115670668e+00;
xceref[2] = 1.3122573342210174e+01;
xceref[3] = 1.2006925323559144e+01;
xceref[4] = 1.2459576151035986e+02;
/*--------------------------------------------------------------------
c reference data for 162X162X162 grids after 200 time steps,
c with DT = 1.0d-04
c-------------------------------------------------------------------*/
} else if (grid_points[0] == 162 &&
grid_points[1] == 162 &&
grid_points[2] == 162 &&
no_time_steps == 200) {
*class = 'C';
dtref = 1.0e-4;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of residual.
c-------------------------------------------------------------------*/
xcrref[0] = 0.62398116551764615e+04;
xcrref[1] = 0.50793239190423964e+03;
xcrref[2] = 0.15423530093013596e+04;
xcrref[3] = 0.13302387929291190e+04;
xcrref[4] = 0.11604087428436455e+05;
/*--------------------------------------------------------------------
c Reference values of RMS-norms of solution error.
c-------------------------------------------------------------------*/
xceref[0] = 0.16462008369091265e+03;
xceref[1] = 0.11497107903824313e+02;
xceref[2] = 0.41207446207461508e+02;
xceref[3] = 0.37087651059694167e+02;
xceref[4] = 0.36211053051841265e+03;
} else {
*verified = FALSE;
}
/*--------------------------------------------------------------------
c verification test for residuals if gridsize is either 12X12X12 or
c 64X64X64 or 102X102X102 or 162X162X162
c-------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Compute the difference of solution values and the known reference values.
c-------------------------------------------------------------------*/
for (m = 0; m < 5; m++) {
xcrdif[m] = fabs((xcr[m]-xcrref[m])/xcrref[m]);
xcedif[m] = fabs((xce[m]-xceref[m])/xceref[m]);
}
/*--------------------------------------------------------------------
c Output the comparison of computed results to known cases.
c-------------------------------------------------------------------*/
if (*class != 'U') {
printf(" Verification being performed for class %1c\n", *class);
printf(" accuracy setting for epsilon = %20.13e\n", epsilon);
if (fabs(dt-dtref) > epsilon) {
*verified = FALSE;
*class = 'U';
printf(" DT does not match the reference value of %15.8e\n", dtref);
}
} else {
printf(" Unknown class\n");
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of residual\n");
} else {
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d%20.13e\n", m, xcr[m]);
} else if (xcrdif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m, xcr[m], xcrref[m], xcrdif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m, xcr[m], xcrref[m], xcrdif[m]);
}
}
if (*class != 'U') {
printf(" Comparison of RMS-norms of solution error\n");
} else {
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++) {
if (*class == 'U') {
printf(" %2d%20.13e\n", m, xce[m]);
} else if (xcedif[m] > epsilon) {
*verified = FALSE;
printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n",
m, xce[m], xceref[m], xcedif[m]);
} else {
printf(" %2d%20.13e%20.13e%20.13e\n",
m, xce[m], xceref[m], xcedif[m]);
}
}
if (*class == 'U') {
printf(" No reference values provided\n");
printf(" No verification performed\n");
} else if (*verified == TRUE) {
printf(" Verification Successful\n");
} else {
printf(" Verification failed\n");
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c
c Performs line solves in X direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix,
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c
c-------------------------------------------------------------------*/
lhsx();
x_solve_cell();
x_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell, then generate U(isize)=rhs[isize)
c else assume U(isize) is loaded in un pack backsub_info
c so just use it
c after call u(istart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
for (i = grid_points[0]-2; i >= 0; i--) {
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i+1][j][k][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void x_solve_cell(void) {
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(IMAX) and rhs'(IMAX) will be sent to next cell
c-------------------------------------------------------------------*/
int i,j,k,isize;
isize = grid_points[0]-1;
/*--------------------------------------------------------------------
c outer most do loops - sweeping in i direction
c-------------------------------------------------------------------*/
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c multiply c(0,j,k) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[0][j][k][BB],
lhs[0][j][k][CC],
rhs[0][j][k] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (i = 1; i < isize; i++) {
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(i) = rhs(i) - A*rhs(i-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i-1][j][k], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(i) = B(i) - C(i-1)*A(i)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i-1][j][k][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(1,j,k) by b_inverse(1,j,k) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
#pragma omp for
for (j = 1; j < grid_points[1]-1; j++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(isize) = rhs(isize) - A*rhs(isize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[isize][j][k][AA],
rhs[isize-1][j][k], rhs[isize][j][k]);
/*--------------------------------------------------------------------
c B(isize) = B(isize) - C(isize-1)*A(isize)
c-------------------------------------------------------------------*/
matmul_sub(lhs[isize][j][k][AA],
lhs[isize-1][j][k][CC],
lhs[isize][j][k][BB]);
/*--------------------------------------------------------------------
c multiply rhs() by b_inverse() and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][j][k][BB],
rhs[i][j][k] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c subtracts bvec=bvec - ablock*avec
c-------------------------------------------------------------------*/
int i;
for (i = 0; i < 5; i++) {
/*--------------------------------------------------------------------
c rhs(i,ic,jc,kc,ccell) = rhs(i,ic,jc,kc,ccell)
c $ - lhs[i,1,ablock,ia,ja,ka,acell)*
c-------------------------------------------------------------------*/
bvec[i] = bvec[i] - ablock[i][0]*avec[0]
- ablock[i][1]*avec[1]
- ablock[i][2]*avec[2]
- ablock[i][3]*avec[3]
- ablock[i][4]*avec[4];
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void matmul_sub(double ablock[5][5], double bblock[5][5],
double cblock[5][5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c subtracts a(i,j,k) X b(i,j,k) from c(i,j,k)
c-------------------------------------------------------------------*/
int j;
for (j = 0; j < 5; j++) {
cblock[0][j] = cblock[0][j] - ablock[0][0]*bblock[0][j]
- ablock[0][1]*bblock[1][j]
- ablock[0][2]*bblock[2][j]
- ablock[0][3]*bblock[3][j]
- ablock[0][4]*bblock[4][j];
cblock[1][j] = cblock[1][j] - ablock[1][0]*bblock[0][j]
- ablock[1][1]*bblock[1][j]
- ablock[1][2]*bblock[2][j]
- ablock[1][3]*bblock[3][j]
- ablock[1][4]*bblock[4][j];
cblock[2][j] = cblock[2][j] - ablock[2][0]*bblock[0][j]
- ablock[2][1]*bblock[1][j]
- ablock[2][2]*bblock[2][j]
- ablock[2][3]*bblock[3][j]
- ablock[2][4]*bblock[4][j];
cblock[3][j] = cblock[3][j] - ablock[3][0]*bblock[0][j]
- ablock[3][1]*bblock[1][j]
- ablock[3][2]*bblock[2][j]
- ablock[3][3]*bblock[3][j]
- ablock[3][4]*bblock[4][j];
cblock[4][j] = cblock[4][j] - ablock[4][0]*bblock[0][j]
- ablock[4][1]*bblock[1][j]
- ablock[4][2]*bblock[2][j]
- ablock[4][3]*bblock[3][j]
- ablock[4][4]*bblock[4][j];
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void binvcrhs(double lhs[5][5], double c[5][5], double r[5]) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
double pivot, coeff;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
pivot = 1.00/lhs[0][0];
lhs[0][1] = lhs[0][1]*pivot;
lhs[0][2] = lhs[0][2]*pivot;
lhs[0][3] = lhs[0][3]*pivot;
lhs[0][4] = lhs[0][4]*pivot;
c[0][0] = c[0][0]*pivot;
c[0][1] = c[0][1]*pivot;
c[0][2] = c[0][2]*pivot;
c[0][3] = c[0][3]*pivot;
c[0][4] = c[0][4]*pivot;
r[0] = r[0] *pivot;
coeff = lhs[1][0];
lhs[1][1]= lhs[1][1] - coeff*lhs[0][1];
lhs[1][2]= lhs[1][2] - coeff*lhs[0][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[0][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[0][4];
c[1][0] = c[1][0] - coeff*c[0][0];
c[1][1] = c[1][1] - coeff*c[0][1];
c[1][2] = c[1][2] - coeff*c[0][2];
c[1][3] = c[1][3] - coeff*c[0][3];
c[1][4] = c[1][4] - coeff*c[0][4];
r[1] = r[1] - coeff*r[0];
coeff = lhs[2][0];
lhs[2][1]= lhs[2][1] - coeff*lhs[0][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[0][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[0][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[0][4];
c[2][0] = c[2][0] - coeff*c[0][0];
c[2][1] = c[2][1] - coeff*c[0][1];
c[2][2] = c[2][2] - coeff*c[0][2];
c[2][3] = c[2][3] - coeff*c[0][3];
c[2][4] = c[2][4] - coeff*c[0][4];
r[2] = r[2] - coeff*r[0];
coeff = lhs[3][0];
lhs[3][1]= lhs[3][1] - coeff*lhs[0][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[0][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[0][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[0][4];
c[3][0] = c[3][0] - coeff*c[0][0];
c[3][1] = c[3][1] - coeff*c[0][1];
c[3][2] = c[3][2] - coeff*c[0][2];
c[3][3] = c[3][3] - coeff*c[0][3];
c[3][4] = c[3][4] - coeff*c[0][4];
r[3] = r[3] - coeff*r[0];
coeff = lhs[4][0];
lhs[4][1]= lhs[4][1] - coeff*lhs[0][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[0][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[0][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[0][4];
c[4][0] = c[4][0] - coeff*c[0][0];
c[4][1] = c[4][1] - coeff*c[0][1];
c[4][2] = c[4][2] - coeff*c[0][2];
c[4][3] = c[4][3] - coeff*c[0][3];
c[4][4] = c[4][4] - coeff*c[0][4];
r[4] = r[4] - coeff*r[0];
pivot = 1.00/lhs[1][1];
lhs[1][2] = lhs[1][2]*pivot;
lhs[1][3] = lhs[1][3]*pivot;
lhs[1][4] = lhs[1][4]*pivot;
c[1][0] = c[1][0]*pivot;
c[1][1] = c[1][1]*pivot;
c[1][2] = c[1][2]*pivot;
c[1][3] = c[1][3]*pivot;
c[1][4] = c[1][4]*pivot;
r[1] = r[1] *pivot;
coeff = lhs[0][1];
lhs[0][2]= lhs[0][2] - coeff*lhs[1][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[1][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[1][4];
c[0][0] = c[0][0] - coeff*c[1][0];
c[0][1] = c[0][1] - coeff*c[1][1];
c[0][2] = c[0][2] - coeff*c[1][2];
c[0][3] = c[0][3] - coeff*c[1][3];
c[0][4] = c[0][4] - coeff*c[1][4];
r[0] = r[0] - coeff*r[1];
coeff = lhs[2][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[1][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[1][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[1][4];
c[2][0] = c[2][0] - coeff*c[1][0];
c[2][1] = c[2][1] - coeff*c[1][1];
c[2][2] = c[2][2] - coeff*c[1][2];
c[2][3] = c[2][3] - coeff*c[1][3];
c[2][4] = c[2][4] - coeff*c[1][4];
r[2] = r[2] - coeff*r[1];
coeff = lhs[3][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[1][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[1][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[1][4];
c[3][0] = c[3][0] - coeff*c[1][0];
c[3][1] = c[3][1] - coeff*c[1][1];
c[3][2] = c[3][2] - coeff*c[1][2];
c[3][3] = c[3][3] - coeff*c[1][3];
c[3][4] = c[3][4] - coeff*c[1][4];
r[3] = r[3] - coeff*r[1];
coeff = lhs[4][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[1][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[1][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[1][4];
c[4][0] = c[4][0] - coeff*c[1][0];
c[4][1] = c[4][1] - coeff*c[1][1];
c[4][2] = c[4][2] - coeff*c[1][2];
c[4][3] = c[4][3] - coeff*c[1][3];
c[4][4] = c[4][4] - coeff*c[1][4];
r[4] = r[4] - coeff*r[1];
pivot = 1.00/lhs[2][2];
lhs[2][3] = lhs[2][3]*pivot;
lhs[2][4] = lhs[2][4]*pivot;
c[2][0] = c[2][0]*pivot;
c[2][1] = c[2][1]*pivot;
c[2][2] = c[2][2]*pivot;
c[2][3] = c[2][3]*pivot;
c[2][4] = c[2][4]*pivot;
r[2] = r[2] *pivot;
coeff = lhs[0][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[2][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[2][4];
c[0][0] = c[0][0] - coeff*c[2][0];
c[0][1] = c[0][1] - coeff*c[2][1];
c[0][2] = c[0][2] - coeff*c[2][2];
c[0][3] = c[0][3] - coeff*c[2][3];
c[0][4] = c[0][4] - coeff*c[2][4];
r[0] = r[0] - coeff*r[2];
coeff = lhs[1][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[2][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[2][4];
c[1][0] = c[1][0] - coeff*c[2][0];
c[1][1] = c[1][1] - coeff*c[2][1];
c[1][2] = c[1][2] - coeff*c[2][2];
c[1][3] = c[1][3] - coeff*c[2][3];
c[1][4] = c[1][4] - coeff*c[2][4];
r[1] = r[1] - coeff*r[2];
coeff = lhs[3][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[2][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[2][4];
c[3][0] = c[3][0] - coeff*c[2][0];
c[3][1] = c[3][1] - coeff*c[2][1];
c[3][2] = c[3][2] - coeff*c[2][2];
c[3][3] = c[3][3] - coeff*c[2][3];
c[3][4] = c[3][4] - coeff*c[2][4];
r[3] = r[3] - coeff*r[2];
coeff = lhs[4][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[2][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[2][4];
c[4][0] = c[4][0] - coeff*c[2][0];
c[4][1] = c[4][1] - coeff*c[2][1];
c[4][2] = c[4][2] - coeff*c[2][2];
c[4][3] = c[4][3] - coeff*c[2][3];
c[4][4] = c[4][4] - coeff*c[2][4];
r[4] = r[4] - coeff*r[2];
pivot = 1.00/lhs[3][3];
lhs[3][4] = lhs[3][4]*pivot;
c[3][0] = c[3][0]*pivot;
c[3][1] = c[3][1]*pivot;
c[3][2] = c[3][2]*pivot;
c[3][3] = c[3][3]*pivot;
c[3][4] = c[3][4]*pivot;
r[3] = r[3] *pivot;
coeff = lhs[0][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[3][4];
c[0][0] = c[0][0] - coeff*c[3][0];
c[0][1] = c[0][1] - coeff*c[3][1];
c[0][2] = c[0][2] - coeff*c[3][2];
c[0][3] = c[0][3] - coeff*c[3][3];
c[0][4] = c[0][4] - coeff*c[3][4];
r[0] = r[0] - coeff*r[3];
coeff = lhs[1][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[3][4];
c[1][0] = c[1][0] - coeff*c[3][0];
c[1][1] = c[1][1] - coeff*c[3][1];
c[1][2] = c[1][2] - coeff*c[3][2];
c[1][3] = c[1][3] - coeff*c[3][3];
c[1][4] = c[1][4] - coeff*c[3][4];
r[1] = r[1] - coeff*r[3];
coeff = lhs[2][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[3][4];
c[2][0] = c[2][0] - coeff*c[3][0];
c[2][1] = c[2][1] - coeff*c[3][1];
c[2][2] = c[2][2] - coeff*c[3][2];
c[2][3] = c[2][3] - coeff*c[3][3];
c[2][4] = c[2][4] - coeff*c[3][4];
r[2] = r[2] - coeff*r[3];
coeff = lhs[4][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[3][4];
c[4][0] = c[4][0] - coeff*c[3][0];
c[4][1] = c[4][1] - coeff*c[3][1];
c[4][2] = c[4][2] - coeff*c[3][2];
c[4][3] = c[4][3] - coeff*c[3][3];
c[4][4] = c[4][4] - coeff*c[3][4];
r[4] = r[4] - coeff*r[3];
pivot = 1.00/lhs[4][4];
c[4][0] = c[4][0]*pivot;
c[4][1] = c[4][1]*pivot;
c[4][2] = c[4][2]*pivot;
c[4][3] = c[4][3]*pivot;
c[4][4] = c[4][4]*pivot;
r[4] = r[4] *pivot;
coeff = lhs[0][4];
c[0][0] = c[0][0] - coeff*c[4][0];
c[0][1] = c[0][1] - coeff*c[4][1];
c[0][2] = c[0][2] - coeff*c[4][2];
c[0][3] = c[0][3] - coeff*c[4][3];
c[0][4] = c[0][4] - coeff*c[4][4];
r[0] = r[0] - coeff*r[4];
coeff = lhs[1][4];
c[1][0] = c[1][0] - coeff*c[4][0];
c[1][1] = c[1][1] - coeff*c[4][1];
c[1][2] = c[1][2] - coeff*c[4][2];
c[1][3] = c[1][3] - coeff*c[4][3];
c[1][4] = c[1][4] - coeff*c[4][4];
r[1] = r[1] - coeff*r[4];
coeff = lhs[2][4];
c[2][0] = c[2][0] - coeff*c[4][0];
c[2][1] = c[2][1] - coeff*c[4][1];
c[2][2] = c[2][2] - coeff*c[4][2];
c[2][3] = c[2][3] - coeff*c[4][3];
c[2][4] = c[2][4] - coeff*c[4][4];
r[2] = r[2] - coeff*r[4];
coeff = lhs[3][4];
c[3][0] = c[3][0] - coeff*c[4][0];
c[3][1] = c[3][1] - coeff*c[4][1];
c[3][2] = c[3][2] - coeff*c[4][2];
c[3][3] = c[3][3] - coeff*c[4][3];
c[3][4] = c[3][4] - coeff*c[4][4];
r[3] = r[3] - coeff*r[4];
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void binvrhs( double lhs[5][5], double r[5] ) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
double pivot, coeff;
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
pivot = 1.00/lhs[0][0];
lhs[0][1] = lhs[0][1]*pivot;
lhs[0][2] = lhs[0][2]*pivot;
lhs[0][3] = lhs[0][3]*pivot;
lhs[0][4] = lhs[0][4]*pivot;
r[0] = r[0] *pivot;
coeff = lhs[1][0];
lhs[1][1]= lhs[1][1] - coeff*lhs[0][1];
lhs[1][2]= lhs[1][2] - coeff*lhs[0][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[0][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[0][4];
r[1] = r[1] - coeff*r[0];
coeff = lhs[2][0];
lhs[2][1]= lhs[2][1] - coeff*lhs[0][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[0][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[0][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[0][4];
r[2] = r[2] - coeff*r[0];
coeff = lhs[3][0];
lhs[3][1]= lhs[3][1] - coeff*lhs[0][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[0][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[0][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[0][4];
r[3] = r[3] - coeff*r[0];
coeff = lhs[4][0];
lhs[4][1]= lhs[4][1] - coeff*lhs[0][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[0][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[0][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[0][4];
r[4] = r[4] - coeff*r[0];
pivot = 1.00/lhs[1][1];
lhs[1][2] = lhs[1][2]*pivot;
lhs[1][3] = lhs[1][3]*pivot;
lhs[1][4] = lhs[1][4]*pivot;
r[1] = r[1] *pivot;
coeff = lhs[0][1];
lhs[0][2]= lhs[0][2] - coeff*lhs[1][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[1][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[1][4];
r[0] = r[0] - coeff*r[1];
coeff = lhs[2][1];
lhs[2][2]= lhs[2][2] - coeff*lhs[1][2];
lhs[2][3]= lhs[2][3] - coeff*lhs[1][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[1][4];
r[2] = r[2] - coeff*r[1];
coeff = lhs[3][1];
lhs[3][2]= lhs[3][2] - coeff*lhs[1][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[1][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[1][4];
r[3] = r[3] - coeff*r[1];
coeff = lhs[4][1];
lhs[4][2]= lhs[4][2] - coeff*lhs[1][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[1][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[1][4];
r[4] = r[4] - coeff*r[1];
pivot = 1.00/lhs[2][2];
lhs[2][3] = lhs[2][3]*pivot;
lhs[2][4] = lhs[2][4]*pivot;
r[2] = r[2] *pivot;
coeff = lhs[0][2];
lhs[0][3]= lhs[0][3] - coeff*lhs[2][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[2][4];
r[0] = r[0] - coeff*r[2];
coeff = lhs[1][2];
lhs[1][3]= lhs[1][3] - coeff*lhs[2][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[2][4];
r[1] = r[1] - coeff*r[2];
coeff = lhs[3][2];
lhs[3][3]= lhs[3][3] - coeff*lhs[2][3];
lhs[3][4]= lhs[3][4] - coeff*lhs[2][4];
r[3] = r[3] - coeff*r[2];
coeff = lhs[4][2];
lhs[4][3]= lhs[4][3] - coeff*lhs[2][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[2][4];
r[4] = r[4] - coeff*r[2];
pivot = 1.00/lhs[3][3];
lhs[3][4] = lhs[3][4]*pivot;
r[3] = r[3] *pivot;
coeff = lhs[0][3];
lhs[0][4]= lhs[0][4] - coeff*lhs[3][4];
r[0] = r[0] - coeff*r[3];
coeff = lhs[1][3];
lhs[1][4]= lhs[1][4] - coeff*lhs[3][4];
r[1] = r[1] - coeff*r[3];
coeff = lhs[2][3];
lhs[2][4]= lhs[2][4] - coeff*lhs[3][4];
r[2] = r[2] - coeff*r[3];
coeff = lhs[4][3];
lhs[4][4]= lhs[4][4] - coeff*lhs[3][4];
r[4] = r[4] - coeff*r[3];
pivot = 1.00/lhs[4][4];
r[4] = r[4] *pivot;
coeff = lhs[0][4];
r[0] = r[0] - coeff*r[4];
coeff = lhs[1][4];
r[1] = r[1] - coeff*r[4];
coeff = lhs[2][4];
r[2] = r[2] - coeff*r[4];
coeff = lhs[3][4];
r[3] = r[3] - coeff*r[4];
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs line solves in Y direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix][
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c-------------------------------------------------------------------*/
lhsy();
y_solve_cell();
y_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell][ then generate U(jsize)=rhs(jsize)
c else assume U(jsize) is loaded in un pack backsub_info
c so just use it
c after call u(jstart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
for (j = grid_points[1]-2; j >= 0; j--) {
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i][j+1][k][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void y_solve_cell(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(JMAX) and rhs'(JMAX) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, jsize;
jsize = grid_points[1]-1;
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c multiply c(i,0,k) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][0][k][BB],
lhs[i][0][k][CC],
rhs[i][0][k] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (j = 1; j < jsize; j++) {
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c subtract A*lhs_vector(j-1) from lhs_vector(j)
c
c rhs(j) = rhs(j) - A*rhs(j-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i][j-1][k], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(j) = B(j) - C(j-1)*A(j)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i][j-1][k][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(i,1,k) by b_inverse(i,1,k) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (k = 1; k < grid_points[2]-1; k++) {
/*--------------------------------------------------------------------
c rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][jsize][k][AA],
rhs[i][jsize-1][k], rhs[i][jsize][k]);
/*--------------------------------------------------------------------
c B(jsize) = B(jsize) - C(jsize-1)*A(jsize)
c call matmul_sub(aa,i,jsize,k,c,
c $ cc,i,jsize-1,k,c,BB,i,jsize,k)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][jsize][k][AA],
lhs[i][jsize-1][k][CC],
lhs[i][jsize][k][BB]);
/*--------------------------------------------------------------------
c multiply rhs(jsize) by b_inverse(jsize) and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][jsize][k][BB],
rhs[i][jsize][k] );
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c Performs line solves in Z direction by first factoring
c the block-tridiagonal matrix into an upper triangular matrix,
c and then performing back substitution to solve for the unknow
c vectors of each line.
c
c Make sure we treat elements zero to cell_size in the direction
c of the sweep.
c-------------------------------------------------------------------*/
lhsz();
z_solve_cell();
z_backsubstitute();
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_backsubstitute(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c back solve: if last cell, then generate U(ksize)=rhs(ksize)
c else assume U(ksize) is loaded in un pack backsub_info
c so just use it
c after call u(kstart) will be sent to next cell
c-------------------------------------------------------------------*/
int i, j, k, m, n;
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
for (k = grid_points[2]-2; k >= 0; k--) {
for (m = 0; m < BLOCK_SIZE; m++) {
for (n = 0; n < BLOCK_SIZE; n++) {
rhs[i][j][k][m] = rhs[i][j][k][m]
- lhs[i][j][k][CC][m][n]*rhs[i][j][k+1][n];
}
}
}
}
}
}
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
static void z_solve_cell(void) {
/*--------------------------------------------------------------------
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c performs guaussian elimination on this cell.
c
c assumes that unpacking routines for non-first cells
c preload C' and rhs' from previous cell.
c
c assumed send happens outside this routine, but that
c c'(KMAX) and rhs'(KMAX) will be sent to next cell.
c-------------------------------------------------------------------*/
int i,j,k,ksize;
ksize = grid_points[2]-1;
/*--------------------------------------------------------------------
c outer most do loops - sweeping in i direction
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c multiply c(i,j,0) by b_inverse and copy back to c
c multiply rhs(0) by b_inverse(0) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][0][BB],
lhs[i][j][0][CC],
rhs[i][j][0] );
}
}
/*--------------------------------------------------------------------
c begin inner most do loop
c do all the elements of the cell unless last
c-------------------------------------------------------------------*/
for (k = 1; k < ksize; k++) {
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c subtract A*lhs_vector(k-1) from lhs_vector(k)
c
c rhs(k) = rhs(k) - A*rhs(k-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][k][AA],
rhs[i][j][k-1], rhs[i][j][k]);
/*--------------------------------------------------------------------
c B(k) = B(k) - C(k-1)*A(k)
c call matmul_sub(aa,i,j,k,c,cc,i,j,k-1,c,BB,i,j,k)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][k][AA],
lhs[i][j][k-1][CC],
lhs[i][j][k][BB]);
/*--------------------------------------------------------------------
c multiply c(i,j,k) by b_inverse and copy back to c
c multiply rhs(i,j,1) by b_inverse(i,j,1) and copy to rhs
c-------------------------------------------------------------------*/
binvcrhs( lhs[i][j][k][BB],
lhs[i][j][k][CC],
rhs[i][j][k] );
}
}
}
/*--------------------------------------------------------------------
c Now finish up special cases for last cell
c-------------------------------------------------------------------*/
#pragma omp for
for (i = 1; i < grid_points[0]-1; i++) {
for (j = 1; j < grid_points[1]-1; j++) {
/*--------------------------------------------------------------------
c rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)
c-------------------------------------------------------------------*/
matvec_sub(lhs[i][j][ksize][AA],
rhs[i][j][ksize-1], rhs[i][j][ksize]);
/*--------------------------------------------------------------------
c B(ksize) = B(ksize) - C(ksize-1)*A(ksize)
c call matmul_sub(aa,i,j,ksize,c,
c $ cc,i,j,ksize-1,c,BB,i,j,ksize)
c-------------------------------------------------------------------*/
matmul_sub(lhs[i][j][ksize][AA],
lhs[i][j][ksize-1][CC],
lhs[i][j][ksize][BB]);
/*--------------------------------------------------------------------
c multiply rhs(ksize) by b_inverse(ksize) and copy to rhs
c-------------------------------------------------------------------*/
binvrhs( lhs[i][j][ksize][BB],
rhs[i][j][ksize] );
}
}
}
|
declare_reduction_messages.c | // RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s -Wuninitialized
// RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s -Wuninitialized
int temp; // expected-note 6 {{'temp' declared here}}
#pragma omp declare reduction // expected-error {{expected '(' after 'declare reduction'}}
#pragma omp declare reduction { // expected-error {{expected '(' after 'declare reduction'}}
#pragma omp declare reduction( // expected-error {{expected identifier or one of the following operators: '+', '-', '*', '&', '|', '^', '&&', or '||'}}
#pragma omp declare reduction(# // expected-error {{expected identifier or one of the following operators: '+', '-', '*', '&', '|', '^', '&&', or '||'}}
#pragma omp declare reduction(/ // expected-error {{expected identifier or one of the following operators: '+', '-', '*', '&', '|', '^', '&&', or '||'}}
#pragma omp declare reduction(+ // expected-error {{expected ':'}}
#pragma omp declare reduction(for // expected-error {{expected identifier or one of the following operators: '+', '-', '*', '&', '|', '^', '&&', or '||'}}
#pragma omp declare reduction(if: // expected-error {{expected identifier or one of the following operators: '+', '-', '*', '&', '|', '^', '&&', or '||'}} expected-error {{expected a type}}
#pragma omp declare reduction(oper: // expected-error {{expected a type}}
#pragma omp declare reduction(oper; // expected-error {{expected ':'}} expected-error {{expected a type}}
#pragma omp declare reduction(fun : int // expected-error {{expected ':'}} expected-error {{expected expression}}
#pragma omp declare reduction(+ : const int: // expected-error {{reduction type cannot be qualified with 'const', 'volatile' or 'restrict'}}
#pragma omp declare reduction(- : volatile int: // expected-error {{reduction type cannot be qualified with 'const', 'volatile' or 'restrict'}}
#pragma omp declare reduction(* : int; // expected-error {{expected ','}} expected-error {{expected a type}}
#pragma omp declare reduction(& : double char: // expected-error {{cannot combine with previous 'double' declaration specifier}} expected-error {{expected expression}}
#pragma omp declare reduction(^ : double, char, : // expected-error {{expected a type}} expected-error {{expected expression}}
#pragma omp declare reduction(&& : int, S: // expected-error {{unknown type name 'S'}} expected-error {{expected expression}}
#pragma omp declare reduction(|| : int, double : temp += omp_in) // expected-error 2 {{only 'omp_in' or 'omp_out' variables are allowed in combiner expression}}
#pragma omp declare reduction(| : char, float : omp_out += temp) // expected-error 2 {{only 'omp_in' or 'omp_out' variables are allowed in combiner expression}}
#pragma omp declare reduction(fun : long : omp_out += omp_in) { // expected-error {{expected 'initializer'}} expected-warning {{extra tokens at the end of '#pragma omp declare reduction' are ignored}}
#pragma omp declare reduction(fun : unsigned : omp_out += temp)) // expected-error {{expected 'initializer'}} expected-warning {{extra tokens at the end of '#pragma omp declare reduction' are ignored}} expected-error {{only 'omp_in' or 'omp_out' variables are allowed in combiner expression}}
#pragma omp declare reduction(fun : long(void) : omp_out += omp_in) // expected-error {{reduction type cannot be a function type}}
#pragma omp declare reduction(fun : long[3] : omp_out += omp_in) // expected-error {{reduction type cannot be an array type}}
#pragma omp declare reduction(fun23 : long, int, long : omp_out += omp_in) // expected-error {{redefinition of user-defined reduction for type 'long'}} expected-note {{previous definition is here}}
#pragma omp declare reduction(fun222 : long : omp_out += omp_in)
#pragma omp declare reduction(fun1 : long : omp_out += omp_in) initializer // expected-error {{expected '(' after 'initializer'}}
#pragma omp declare reduction(fun2 : long : omp_out += omp_in) initializer { // expected-error {{expected '(' after 'initializer'}} expected-error {{expected expression}} expected-warning {{extra tokens at the end of '#pragma omp declare reduction' are ignored}}
#pragma omp declare reduction(fun3 : long : omp_out += omp_in) initializer[ // expected-error {{expected '(' after 'initializer'}} expected-error {{expected expression}} expected-warning {{extra tokens at the end of '#pragma omp declare reduction' are ignored}}
#pragma omp declare reduction(fun4 : long : omp_out += omp_in) initializer() // expected-error {{expected expression}}
#pragma omp declare reduction(fun5 : long : omp_out += omp_in) initializer(temp) // expected-error {{only 'omp_priv' or 'omp_orig' variables are allowed in initializer expression}}
#pragma omp declare reduction(fun6 : long : omp_out += omp_in) initializer(omp_orig // expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare reduction(fun7 : long : omp_out += omp_in) initializer(omp_priv 12) // expected-error {{expected ')'}} expected-note {{to match this '('}}
#pragma omp declare reduction(fun8 : long : omp_out += omp_in) initializer(omp_priv = 23) // expected-note {{previous definition is here}}
#pragma omp declare reduction(fun8 : long : omp_out += omp_in) initializer(omp_priv = 23)) // expected-warning {{extra tokens at the end of '#pragma omp declare reduction' are ignored}} expected-error {{redefinition of user-defined reduction for type 'long'}}
#pragma omp declare reduction(fun9 : long : omp_out += omp_in) initializer(omp_priv = ) // expected-error {{expected expression}}
struct S {
int s;
};
#pragma omp declare reduction(+: struct S: omp_out.s += omp_in.s) // initializer(omp_priv = { .s = 0 })
int fun(int arg) {
struct S s;// expected-note {{'s' defined here}}
s.s = 0;
#pragma omp parallel for reduction(+ : s) // expected-error {{list item of type 'struct S' is not valid for specified reduction operation: unable to provide default initialization value}}
for (arg = 0; arg < 10; ++arg)
s.s += arg;
#pragma omp declare reduction(red : int : omp_out++)
{
#pragma omp declare reduction(red : int : omp_out++) // expected-note {{previous definition is here}}
#pragma omp declare reduction(red : int : omp_out++) // expected-error {{redefinition of user-defined reduction for type 'int'}}
{
#pragma omp declare reduction(red : int : omp_out++)
}
}
return arg;
}
|
psd.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/channel.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short int
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[256],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(CompositeOperator op)
{
const char
*blend_mode;
switch (op)
{
case ColorBurnCompositeOp: blend_mode = "idiv"; break;
case ColorDodgeCompositeOp: blend_mode = "div "; break;
case ColorizeCompositeOp: blend_mode = "colr"; break;
case DarkenCompositeOp: blend_mode = "dark"; break;
case DifferenceCompositeOp: blend_mode = "diff"; break;
case DissolveCompositeOp: blend_mode = "diss"; break;
case ExclusionCompositeOp: blend_mode = "smud"; break;
case HardLightCompositeOp: blend_mode = "hLit"; break;
case HardMixCompositeOp: blend_mode = "hMix"; break;
case HueCompositeOp: blend_mode = "hue "; break;
case LightenCompositeOp: blend_mode = "lite"; break;
case LinearBurnCompositeOp: blend_mode = "lbrn"; break;
case LinearDodgeCompositeOp:blend_mode = "lddg"; break;
case LinearLightCompositeOp:blend_mode = "lLit"; break;
case LuminizeCompositeOp: blend_mode = "lum "; break;
case MultiplyCompositeOp: blend_mode = "mul "; break;
case OverCompositeOp: blend_mode = "norm"; break;
case OverlayCompositeOp: blend_mode = "over"; break;
case PinLightCompositeOp: blend_mode = "pLit"; break;
case SaturateCompositeOp: blend_mode = "sat "; break;
case ScreenCompositeOp: blend_mode = "scrn"; break;
case SoftLightCompositeOp: blend_mode = "sLit"; break;
case VividLightCompositeOp: blend_mode = "vLit"; break;
default: blend_mode = "norm";
}
return(blend_mode);
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image,ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace)
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringFalse(option) != MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
register ssize_t
i;
gamma=QuantumScale*GetPixelAlpha(image, q);
if (gamma != 0.0 && gamma != 1.0)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
if (channel != AlphaPixelChannel)
q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma);
}
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == OpaqueAlpha)
return(MagickTrue);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))*
opacity),q);
else if (opacity > 0)
SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/
(MagickRealType) opacity)),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
PixelInfo
color;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
complete_mask->alpha_trait=BlendPixelTrait;
GetPixelInfo(complete_mask,&color);
color.red=background;
SetImageColor(complete_mask,&color,exception);
status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue,
mask->page.x-image->page.x,mask->page.y-image->page.y,exception);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
image->alpha_trait=BlendPixelTrait;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=GetPixelAlpha(image,q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q);
else if (intensity > 0)
SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q);
q+=GetPixelChannels(image);
p+=GetPixelChannels(complete_mask);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
else if (image->depth > 8)
return(2);
}
else
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickBooleanType
status;
channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~
AlphaChannel));
status=NegateImage(image,MagickFalse,exception);
(void) SetImageChannelMask(image,channel_mask);
return(status);
}
static void ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image,ExceptionInfo *exception)
{
const unsigned char
*p;
StringInfo
*profile;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
if (length < 16)
return;
profile=BlobToStringInfo((const unsigned char *) NULL,length);
SetStringInfoDatum(profile,blocks);
(void) SetImageProfile(image,"8bim",profile,exception);
profile=DestroyStringInfo(profile);
for (p=blocks; (p >= blocks) && (p < (blocks+length-16)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if ((p+count) > (blocks+length-16))
return;
switch (id)
{
case 0x03ed:
{
char
value[MagickPathExtent];
unsigned short
resolution;
/*
Resolution info.
*/
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.x=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",image->resolution.x);
(void) SetImageProperty(image,"tiff:XResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->resolution.y=(double) resolution;
(void) FormatLocaleString(value,MagickPathExtent,"%g",image->resolution.y);
(void) SetImageProperty(image,"tiff:YResolution",value,exception);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if (*(p+4) == 0)
*has_merged_image=MagickFalse;
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
return;
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q,
ExceptionInfo *exception)
{
if (image->storage_class == PseudoClass)
{
if (packet_size == 1)
SetPixelIndex(image,ScaleQuantumToChar(pixel),q);
else
SetPixelIndex(image,ScaleQuantumToShort(pixel),q);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t)
ConstrainColormapIndex(image,GetPixelIndex(image,q),exception),q);
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(image, pixel,q);
break;
}
case -2:
case 0:
{
SetPixelRed(image,pixel,q);
if (channels == 1 || type == -2)
SetPixelGray(image,pixel,q);
break;
}
case 1:
{
if (image->storage_class == PseudoClass)
SetPixelAlpha(image,pixel,q);
else
SetPixelGreen(image,pixel,q);
break;
}
case 2:
{
if (image->storage_class == PseudoClass)
SetPixelAlpha(image,pixel,q);
else
SetPixelBlue(image,pixel,q);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelBlack(image,pixel,q);
else
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,pixel,q);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,
const size_t channels,const size_t row,const ssize_t type,
const unsigned char *pixels,ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register Quantum
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (Quantum *) NULL)
return MagickFalse;
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q,exception);
q+=GetPixelChannels(image);
}
else
{
ssize_t
bit,
number_bits;
number_bits=image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit = 0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception);
q+=GetPixelChannels(image);
x++;
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
count,
row_size;
ssize_t
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
if (length > row_size + 256) // arbitrary number
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename);
}
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
(void) ReadBlob(image,compact_size,compact_pixels);
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream,Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
}
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if (layer_info->channel_info[channel].type < -1)
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
SetImageType(mask,GrayscaleType,exception);
channel_image=mask;
}
offset=TellBlob(image);
status=MagickTrue;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
layer_info->mask.image=mask;
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MagickPathExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image,exception);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
layer_info->image->compose=NoCompositeOp;
if (psd_info->mode == CMYKMode)
SetImageColorspace(layer_info->image,CMYKColorspace,exception);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
SetImageColorspace(layer_info->image,GRAYColorspace,exception);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name,
exception);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->alpha_trait=BlendPixelTrait;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j,
compression,exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateCMYK(layer_info->image,exception);
if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL))
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
status=MagickFalse;
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(short) ReadBlobShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->alpha_trait=BlendPixelTrait;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(layer_info,0,(size_t) number_layers*
sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=ReadBlobSignedLong(image);
layer_info[i].page.x=ReadBlobSignedLong(image);
y=ReadBlobSignedLong(image);
x=ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=ReadBlobSignedLong(image);
layer_info[i].mask.page.x=ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (ReadBlobLong(image)-
layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (ReadBlobLong(image)-
layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
/*
We read it, but don't use it...
*/
for (j=0; j < (ssize_t) length; j+=8)
{
size_t blend_source=ReadBlobLong(image);
size_t blend_dest=ReadBlobLong(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" source(%x), dest(%x)",(unsigned int)
blend_source,(unsigned int) blend_dest);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info,exception);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType)
number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image *image,const PSDInfo *psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,i,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,i,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateCMYK(image,exception);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
ssize_t
count;
unsigned char
*data;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count == 0) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if (SetImageBackgroundColor(image,exception) == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (psd_info.mode == LabMode)
SetImageColorspace(image,LabColorspace,exception);
if (psd_info.mode == CMYKMode)
{
SetImageColorspace(image,CMYKColorspace,exception);
if (psd_info.channels > 4)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
}
else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
status=AcquireImageColormap(image,psd_info.depth != 16 ? 256 : 65536,
exception);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
SetImageColorspace(image,GRAYColorspace,exception);
if (psd_info.channels > 1)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
}
else
if (psd_info.channels > 3)
SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if (psd_info.mode == DuotoneMode)
{
/*
Duotone image data; the format of this data is undocumented.
*/
data=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*data));
if (data == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ReadBlob(image,(size_t) length,data);
data=(unsigned char *) RelinquishMagickMemory(data);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->alpha_trait=UndefinedPixelTrait;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
ParseImageResourceBlocks(image,blocks,(size_t) length,&has_merged_image,
exception);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayers(image,image_info,&psd_info,skip_layers,exception) !=
MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
if ((has_merged_image != MagickFalse) || (GetImageListLength(image) == 1))
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) &&
(length != 0))
{
SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayers(image,image_info,&psd_info,MagickFalse,exception);
if (status != MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (GetImageListLength(image) == 1)
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
SetImageAlphaChannel(image,TransparentAlphaChannel,exception);
image->background_color.alpha=TransparentAlpha;
image->background_color.alpha_trait=BlendPixelTrait;
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->flags|=CoderDecoderSeekableStreamFlag;
entry->flags|=CoderEncoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned short) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=(WriteBlobMSBLong(image,(unsigned short) size));
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobMSBLong(image,(unsigned int) size));
return(WriteBlobMSBLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBLong(image,(unsigned int) size);
else
result=WriteBlobMSBLongLong(image,size);
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels,
ExceptionInfo *exception)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (next_image->compression == RLECompression)
{
length=WriteBlobMSBShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
length=WriteBlobMSBShort(image,ZipWithoutPrediction);
#endif
else
length=WriteBlobMSBShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
int
y;
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const Quantum
*p;
register ssize_t
i;
size_t
count,
length;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=(unsigned char *) GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels,
exception);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(const Image *image,
ExceptionInfo *exception)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
}
return(compact_pixels);
}
static size_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate,
ExceptionInfo *exception)
{
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
if (next_image->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsImageGray(next_image) == MagickFalse)
channels=next_image->colorspace == CMYKColorspace ? 4 : 3;
if (next_image->alpha_trait != UndefinedPixelTrait)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsImageGray(next_image) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->alpha_trait != UndefinedPixelTrait)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate,exception);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateCMYK(next_image,exception);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
exception);
if (mask != (Image *) NULL)
{
if (mask->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask,exception);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue,exception);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->resolution.x+0.5;
y_resolution=2.54*65536.0*image->resolution.y+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->resolution.x+0.5;
y_resolution=65536.0*image->resolution.y+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=WriteBlobMSBSignedShort(image,channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) CopyMagickMemory(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)))
{
(void) CopyMagickMemory(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(*p++);
key[1]=(*p++);
key[2]=(*p++);
key[3]=(*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) CopyMagickMemory(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
SetImageProfile(image,"psd:additional-info",info,exception);
return(profile);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
layer_name[MagickPathExtent];
const char
*property;
const StringInfo
*icc_profile,
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
PSDInfo
psd_info;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
num_channels,
packet_size,
rounded_size,
size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->alpha_trait != UndefinedPixelTrait)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
if (SetImageGray(image,exception) != MagickFalse)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorAlphaType) && (image->storage_class == PseudoClass))
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->colorspace != CMYKColorspace)
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL);
else
num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsImageGray(image) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsImageMonochrome(image) && (image->depth == 1) ?
MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace,exception);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsImageGray(image) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[i].green));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((MagickOffsetType) GetStringInfoLength(icc_profile) !=
PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
base_image=GetNextImageInList(image);
if (base_image == (Image *) NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
SetPSDSize(&psd_info,image,0);
SetPSDSize(&psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->alpha_trait != UndefinedPixelTrait)
size+=WriteBlobMSBShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobMSBShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception);
default_color=strlen(property) == 9 ? 255 : 0;
}
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y);
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x);
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+
next_image->rows));
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+
next_image->columns));
channels=1U;
if ((next_image->storage_class != PseudoClass) &&
(IsImageGray(next_image) == MagickFalse))
channels=next_image->colorspace == CMYKColorspace ? 4U : 3U;
total_channels=channels;
if (next_image->alpha_trait != UndefinedPixelTrait)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobMSBShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(&psd_info,image,(signed short) i);
if (next_image->alpha_trait != UndefinedPixelTrait)
size+=WriteChannelSize(&psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(&psd_info,image,-2);
size+=WriteBlob(image,4,(const unsigned char *) "8BIM");
size+=WriteBlob(image,4,(const unsigned char *)
CompositeOperatorToPSDBlendMode(next_image->compose));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ?
1 << 0x02 : 1); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image,exception);
property=(const char *) GetImageProperty(next_image,"label",exception);
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobMSBLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobMSBLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobMSBLong(image,20);
size+=WriteBlobMSBSignedLong(image,mask->page.y);
size+=WriteBlobMSBSignedLong(image,mask->page.x);
size+=WriteBlobMSBLong(image,(const unsigned int) mask->rows+
mask->page.y);
size+=WriteBlobMSBLong(image,(const unsigned int) mask->columns+
mask->page.x);
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0);
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobMSBLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(&psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue,exception);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Write the total size
*/
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 16),size_offset);
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(&psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse,
exception) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
|
CPUImplQPU.h | /*
Copyright (c) 2017-2020 Origin Quantum Computing. All Right Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef CPU_QUANTUM_GATE_H
#define CPU_QUANTUM_GATE_H
#include "Core/VirtualQuantumProcessor/QPUImpl.h"
#include "Core/Utilities/Tools/Utils.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#ifndef SQ2
#define SQ2 (1 / 1.4142135623731)
#endif
#ifndef PI
#define PI 3.14159265358979323846
#endif
#define DECL_GATE_MATRIX(NAME)\
extern const qcomplex_t NAME##00;\
extern const qcomplex_t NAME##01;\
extern const qcomplex_t NAME##10;\
extern const qcomplex_t NAME##11;
#define DECL_ANGLE_GATE_MATRIX(NAME)\
extern const double NAME##_Nx;\
extern const double NAME##_Ny;\
extern const double NAME##_Nz;\
#define REGISTER_GATE_MATRIX(NAME,U00,U01,U10,U11)\
extern const qcomplex_t NAME##00 = U00;\
extern const qcomplex_t NAME##01 = U01;\
extern const qcomplex_t NAME##10 = U10;\
extern const qcomplex_t NAME##11 = U11;
#define REGISTER_ANGLE_GATE_MATRIX(NAME,Nx,Ny,Nz)\
extern const double NAME##_Nx = Nx;\
extern const double NAME##_Ny = Ny;\
extern const double NAME##_Nz = Nz;\
#define CONST_GATE(NAME) \
QError \
NAME(size_t qn, bool isConjugate, double error_rate)\
{ \
const_single_qubit_gate(NAME, qn,isConjugate,error_rate);\
return qErrorNone; \
}
#define CONTROL_CONST_GATE(NAME) \
QError \
NAME(size_t qn, Qnum& vControlBit,bool isConjugate , double error_rate)\
{ \
control_const_single_qubit_gate(NAME, qn,vControlBit,isConjugate,error_rate);\
return qErrorNone; \
}
#define SINGLE_ANGLE_GATE(NAME) \
QError \
NAME(size_t qn,double theta,bool isConjugate, double error_rate)\
{ \
single_qubit_angle_gate(NAME, qn,theta,isConjugate,error_rate);\
return qErrorNone; \
}
#define CONTROL_SINGLE_ANGLE_GATE(NAME) \
QError \
NAME(size_t qn, double theta,Qnum& vControlBit,bool isConjugate, double error_rate)\
{ \
control_single_qubit_angle_gate(NAME, qn, theta,vControlBit,isConjugate, error_rate); \
return qErrorNone; \
}
#define const_single_qubit_gate(GATE_NAME,qn,isConjugate,error_rate) \
single_gate<GATE_NAME##00,GATE_NAME##01,GATE_NAME##10,GATE_NAME##11>(qn,isConjugate,error_rate)
#define control_const_single_qubit_gate(GATE_NAME,qn,vControlBit,isConjugate,error_rate) \
control_single_gate<GATE_NAME##00,GATE_NAME##01,GATE_NAME##10,GATE_NAME##11>\
(qn,vControlBit,isConjugate,error_rate)
#define single_qubit_angle_gate(GATE_NAME,qn,theta,isConjugate,error_rate) \
single_angle_gate<GATE_NAME##_Nx,GATE_NAME##_Ny,GATE_NAME##_Nz>(qn,theta,isConjugate,error_rate)
#define control_single_qubit_angle_gate(GATE_NAME,qn,theta,vControlBit,isConjugate,error_rate) \
control_single_angle_gate<GATE_NAME##_Nx,GATE_NAME##_Ny,GATE_NAME##_Nz> \
(qn,theta,vControlBit,isConjugate,error_rate)
DECL_GATE_MATRIX(Hadamard)
DECL_GATE_MATRIX(X)
DECL_GATE_MATRIX(Y)
DECL_GATE_MATRIX(Z)
DECL_GATE_MATRIX(T)
DECL_GATE_MATRIX(S)
DECL_GATE_MATRIX(P0)
DECL_GATE_MATRIX(P1)
DECL_ANGLE_GATE_MATRIX(RX_GATE)
DECL_ANGLE_GATE_MATRIX(RY_GATE)
DECL_ANGLE_GATE_MATRIX(RZ_GATE)
/**
* @brief QPU implementation by CPU model
* @ingroup VirtualQuantumProcessor
*/
class CPUImplQPU : public QPUImpl
{
public:
vQParam qubit2stat;
QGateParam & findgroup(size_t qn);
CPUImplQPU();
CPUImplQPU(size_t);
~CPUImplQPU();
inline bool TensorProduct(QGateParam& qgroup0, QGateParam& qgroup1)
{
if (qgroup0.qVec[0] == qgroup1.qVec[0])
{
return false;
}
size_t length_0 = qgroup0.qstate.size();
size_t length_1 = qgroup1.qstate.size();
int index = 0;
QStat new_state;
new_state.resize(length_0 * length_1);
#pragma omp parallel for private(index)
for (int i = 0; i < length_1; i++)
{
for (int j = 0; j < length_0; j++)
{
index = i * length_0 + j;
new_state[index] = qgroup0.qstate[j] * qgroup1.qstate[i];
}
}
qgroup0.qstate = new_state;
qgroup0.qVec.insert(qgroup0.qVec.end(), qgroup1.qVec.begin(), qgroup1.qVec.end());
qgroup1.enable = false;
return true;
}
template<const qcomplex_t& U00, const qcomplex_t& U01, const qcomplex_t& U10, const qcomplex_t& U11>
QError single_gate(size_t qn, bool isConjugate, double error_rate)
{
qcomplex_t alpha;
qcomplex_t beta;
QGateParam& qgroup = findgroup(qn);
size_t j;
size_t ststep = 1ull << find(qgroup.qVec.begin(), qgroup.qVec.end(), qn) - qgroup.qVec.begin();
qcomplex_t C00 = U00;
qcomplex_t C01 = U01;
qcomplex_t C10 = U10;
qcomplex_t C11 = U11;
if (isConjugate)
{
qcomplex_t temp;
C00 = qcomplex_t(C00.real(), -C00.imag());
C01 = qcomplex_t(C01.real(), -C01.imag());
C10 = qcomplex_t(C10.real(), -C10.imag());
C11 = qcomplex_t(C11.real(), -C11.imag());
temp = C01;;
C01 = U10;
C10 = temp;
}
//#pragma omp parallel for private(j,alpha,beta)
for (size_t i = 0; i < qgroup.qstate.size(); i += ststep * 2)
{
for (j = i; j<i + ststep; j++)
{
alpha = qgroup.qstate[j];
beta = qgroup.qstate[j + ststep];
qgroup.qstate[j] = C00 * alpha + C01 * beta; /* in j,the goal qubit is in |0> */
qgroup.qstate[j + ststep] = C10 * alpha + C11 * beta; /* in j+ststep,the goal qubit is in |1> */
}
}
return qErrorNone;
}
QError U1_GATE(size_t qn, double theta,bool isConjugate,double error_rate)
{
QGateParam& qgroup = findgroup(qn);
size_t ststep = 1ull << find(qgroup.qVec.begin(), qgroup.qVec.end(), qn) - qgroup.qVec.begin();
qcomplex_t C00 = (1,0);
qcomplex_t C01 = (0,0);
qcomplex_t C10 = (0,0);
qcomplex_t C11 = isConjugate? qcomplex_t(cos(-theta), sin(-theta)) :qcomplex_t(cos(theta),sin(theta));
for (size_t i = 0; i < qgroup.qstate.size(); i += ststep * 2)
{
for (size_t j = i; j < i + ststep; ++j)
{
qgroup.qstate[j + ststep] = C11 * qgroup.qstate[j + ststep];
}
}
return qErrorNone;
}
template<const double& Nx, const double& Ny, const double& Nz>
QError single_angle_gate(size_t qn, double theta, bool isConjugate, double error_rate)
{
qcomplex_t alpha;
qcomplex_t beta;
qcomplex_t U00(cos(theta / 2), -sin(theta / 2)*Nz);
qcomplex_t U01(-sin(theta / 2)*Ny, -sin(theta / 2)*Nx);
qcomplex_t U10(sin(theta / 2)*Ny, -sin(theta / 2)*Nx);
qcomplex_t U11(cos(theta / 2), sin(theta / 2)*Nz);
if (isConjugate)
{
qcomplex_t temp;
U00 = qcomplex_t(U00.real(), -U00.imag());
U01 = qcomplex_t(U01.real(), -U01.imag());
U10 = qcomplex_t(U10.real(), -U10.imag());
U11 = qcomplex_t(U11.real(), -U11.imag());
temp = U01;
U01 = U10;
U10 = temp;
}
QGateParam& qgroup = findgroup(qn);
size_t j;
size_t ststep = 1ull << find(qgroup.qVec.begin(), qgroup.qVec.end(), qn) - qgroup.qVec.begin();
//#pragma omp parallel for private(j,alpha,beta)
for (size_t i = 0; i < qgroup.qstate.size(); i += ststep * 2)
{
for (j = i; j<i + ststep; j++)
{
alpha = qgroup.qstate[j];
beta = qgroup.qstate[j + ststep];
qgroup.qstate[j] = U00 * alpha + U01 * beta; /* in j,the goal qubit is in |0> */
qgroup.qstate[j + ststep] = U10 * alpha + U11 * beta; /* in j+ststep,the goal qubit is in |1> */
}
}
return qErrorNone;
}
template<const double& Nx, const double& Ny, const double& Nz>
QError control_single_angle_gate(size_t qn,
double theta,
Qnum vControlBit,
bool isConjugate,
double error_rate)
{
if (QPanda::RandomNumberGenerator() > error_rate)
{
QGateParam& qgroup0 = findgroup(qn);
for (auto iter = vControlBit.begin(); iter != vControlBit.end(); iter++)
{
TensorProduct(qgroup0, findgroup(*iter));
}
size_t M = 1ull << (qgroup0.qVec.size() - vControlBit.size());
size_t x;
size_t n = qgroup0.qVec.size();
size_t ststep = 1ull << (find(qgroup0.qVec.begin(), qgroup0.qVec.end(), qn)
- qgroup0.qVec.begin());
size_t index = 0;
size_t block = 0;
qcomplex_t alpha, beta;
qcomplex_t U00(cos(theta / 2), -sin(theta / 2)*Nz);
qcomplex_t U01(-sin(theta / 2)*Ny, -sin(theta / 2)*Nx);
qcomplex_t U10(sin(theta / 2)*Ny, -sin(theta / 2)*Nx);
qcomplex_t U11(cos(theta / 2), sin(theta / 2)*Nz);
if (isConjugate)
{
qcomplex_t temp;
U00 = qcomplex_t(U00.real(), -U00.imag());
U01 = qcomplex_t(U01.real(), -U01.imag());
U10 = qcomplex_t(U10.real(), -U10.imag());
U11 = qcomplex_t(U11.real(), -U11.imag());
temp = U01;
U01 = U10;
U10 = temp;
}
Qnum qvtemp;
for (auto iter = vControlBit.begin(); iter != vControlBit.end(); iter++)
{
size_t stemp = (find(qgroup0.qVec.begin(), qgroup0.qVec.end(), *iter)
- qgroup0.qVec.begin());
block += 1ull << stemp;
qvtemp.push_back(stemp);
}
sort(qvtemp.begin(), qvtemp.end());
Qnum::iterator qiter;
size_t j;
//#pragma omp parallel for private(j,alpha,beta,index,x,qiter)
for (size_t i = 0; i < M; i++)
{
index = 0;
x = i;
qiter = qvtemp.begin();
for (j = 0; j < n; j++)
{
while (qiter != qvtemp.end() && *qiter == j)
{
qiter++;
j++;
}
//index += ((x % 2)*(1ull << j));
index += ((x & 1) << j);
x >>= 1;
}
/*
* control qubits are 1,target qubit is 0
*/
index = index + block - ststep;
alpha = qgroup0.qstate[index];
beta = qgroup0.qstate[index + ststep];
qgroup0.qstate[index] = alpha * U00 + beta * U01;
qgroup0.qstate[index + ststep] = alpha * U10 + beta * U11;
}
}
return qErrorNone;
}
template<const qcomplex_t& U00,
const qcomplex_t& U01,
const qcomplex_t& U10,
const qcomplex_t& U11>
QError control_single_gate(
size_t qn,
Qnum vControlBit,
bool isConjugate,
double error_rate)
{
if (QPanda::RandomNumberGenerator() > error_rate)
{
QGateParam& qgroup0 = findgroup(qn);
for (auto iter = vControlBit.begin(); iter != vControlBit.end(); iter++)
{
TensorProduct(qgroup0, findgroup(*iter));
}
size_t M = 1ull << (qgroup0.qVec.size() - vControlBit.size());
size_t x;
size_t n = qgroup0.qVec.size();
size_t ststep = 1ull << (find(qgroup0.qVec.begin(), qgroup0.qVec.end(), qn)
- qgroup0.qVec.begin());
size_t index = 0;
size_t block = 0;
qcomplex_t alpha, beta;
qcomplex_t C00 = U00;
qcomplex_t C01 = U01;
qcomplex_t C10 = U10;
qcomplex_t C11 = U11;
if (isConjugate)
{
qcomplex_t temp;
C00 = qcomplex_t(C00.real(), -C00.imag());
C01 = qcomplex_t(C01.real(), -C01.imag());
C10 = qcomplex_t(C10.real(), -C10.imag());
C11 = qcomplex_t(C11.real(), -C11.imag());
temp = C01;
C01 = U10;
C10 = temp;
}
Qnum qvtemp;
for (auto iter = vControlBit.begin(); iter != vControlBit.end(); iter++)
{
size_t stemp = (find(qgroup0.qVec.begin(), qgroup0.qVec.end(), *iter)
- qgroup0.qVec.begin());
block += 1ull << stemp;
qvtemp.push_back(stemp);
}
sort(qvtemp.begin(), qvtemp.end());
Qnum::iterator qiter;
size_t j;
//#pragma omp parallel for private(j,alpha,beta,index,x,qiter)
for (size_t i = 0; i < M; i++)
{
index = 0;
x = i;
qiter = qvtemp.begin();
for (j = 0; j < n; j++)
{
while (qiter != qvtemp.end() && *qiter == j)
{
qiter++;
j++;
}
//index += ((x % 2)*(1ull << j));
index += ((x & 1) << j);
x >>= 1;
}
/*
* control qubits are 1,target qubit is 0
*/
index = index + block - ststep;
alpha = qgroup0.qstate[index];
beta = qgroup0.qstate[index + ststep];
qgroup0.qstate[index] = alpha * C00 + beta * C01;
qgroup0.qstate[index + ststep] = alpha * C10 + beta * C11;
}
}
return qErrorNone;
}
//single qubit gate and control-single qubit gate
CONST_GATE(P0);
CONST_GATE(P1);
CONST_GATE(X);
CONST_GATE(Y);
CONST_GATE(Z);
CONST_GATE(Hadamard);
CONST_GATE(T);
CONST_GATE(S);
SINGLE_ANGLE_GATE(RX_GATE);
SINGLE_ANGLE_GATE(RY_GATE);
SINGLE_ANGLE_GATE(RZ_GATE);
CONTROL_SINGLE_ANGLE_GATE(RX_GATE);
CONTROL_SINGLE_ANGLE_GATE(RY_GATE);
CONTROL_SINGLE_ANGLE_GATE(RZ_GATE);
CONTROL_CONST_GATE(Hadamard);
CONTROL_CONST_GATE(X); //CCCC-NOT
CONTROL_CONST_GATE(Y);
CONTROL_CONST_GATE(Z);
CONTROL_CONST_GATE(T);
CONTROL_CONST_GATE(S);
CONTROL_CONST_GATE(P0);
CONTROL_CONST_GATE(P1);
//define const CNOT,CZ,ISWAP,SQISWAP
inline QError CNOT(size_t qn_0, size_t qn_1,
bool isConjugate, double error_rate)
{
Qnum qvtemp;
qvtemp.push_back(qn_0);
qvtemp.push_back(qn_1);
X(qn_1, qvtemp, isConjugate, error_rate); //qn_1 is target
return qErrorNone;
}
inline QError CNOT(size_t qn_0, size_t qn_1, Qnum& vControlBit,
bool isConjugate, double error_rate)
{
X(qn_1, vControlBit, isConjugate, error_rate); //qn_1 is target
return qErrorNone;
}
QError iSWAP(size_t qn_0, size_t qn_1, double theta,
bool isConjugate, double);
QError iSWAP(size_t qn_0, size_t qn_1, Qnum& vControlBit,
double theta, bool isConjugate, double);
inline QError iSWAP(size_t qn_0, size_t qn_1,
bool isConjugate, double error_rate)
{
iSWAP(qn_0, qn_1, PI / 2, isConjugate, error_rate);
return qErrorNone;
}
inline QError iSWAP(size_t qn_0, size_t qn_1, Qnum& vControlBit,
bool isConjugate, double error_rate)
{
iSWAP(qn_0, qn_1, vControlBit, PI / 2, isConjugate, error_rate);
return qErrorNone;
}
inline QError SqiSWAP(size_t qn_0, size_t qn_1,
bool isConjugate, double error_rate)
{
iSWAP(qn_0, qn_1, PI / 4, isConjugate, error_rate);
return qErrorNone;
}
inline QError SqiSWAP(size_t qn_0, size_t qn_1, Qnum& vControlBit,
bool isConjugate, double error_rate)
{
iSWAP(qn_0, qn_1, vControlBit, PI / 4, isConjugate, error_rate);
return qErrorNone;
}
QError CR(size_t qn_0, size_t qn_1,
double theta, bool isConjugate, double error_rate);
QError CR(size_t qn_0, size_t qn_1, Qnum& vControlBit,
double theta, bool isConjugate, double error_rate);
inline QError CZ(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate)
{
CR(qn_0, qn_1, PI, isConjugate, error_rate);
return qErrorNone;
}
inline QError CZ(size_t qn_0, size_t qn_1, Qnum& vControlBit, bool isConjugate, double error_rate)
{
CR(qn_0, qn_1, vControlBit, PI, isConjugate, error_rate);
return qErrorNone;
}
//define unitary single/double quantum gate
QError unitarySingleQubitGate(size_t qn,
QStat& matrix, bool isConjugate,
GateType);
QError controlunitarySingleQubitGate(size_t qn, Qnum& vControlBit,
QStat& matrix, bool isConjugate,
GateType);
QError unitaryDoubleQubitGate(size_t qn_0, size_t qn_1,
QStat& matrix, bool isConjugate,
GateType);
QError controlunitaryDoubleQubitGate(size_t qn_0, size_t qn_1, Qnum& vControlBit,
QStat& matrix, bool isConjugate,
GateType);
QError DiagonalGate(Qnum& vQubit, QStat & matrix,
bool isConjugate, double error_rate);
QError controlDiagonalGate(Qnum& vQubit, QStat & matrix, Qnum& vControlBit,
bool isConjugate, double error_rate);
QStat getQState();
QError Reset(size_t qn);
bool qubitMeasure(size_t qn);
QError pMeasure(Qnum& qnum, prob_tuple &mResult,
int select_max=-1);
QError pMeasure(Qnum& qnum, prob_vec &mResult);
QError initState(size_t head_rank, size_t rank_size, size_t qubit_num);
inline QError P00(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate)
{
QStat P00_matrix = { 1,0,0,0,
0,1,0,0,
0,0,1,0,
0,0,0,0 };
return unitaryDoubleQubitGate(qn_0, qn_1, P00_matrix, isConjugate,GateType::P00_GATE);
}
inline QError SWAP(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate)
{
QStat P00_matrix = { 1,0,0,0,
0,0,1,0,
0,1,0,0,
0,0,0,1 };
return unitaryDoubleQubitGate(qn_0, qn_1, P00_matrix, isConjugate, GateType::SWAP_GATE);
}
inline QError P11(size_t qn_0, size_t qn_1, bool isConjugate, double error_rate)
{
QStat P11_matrix = { 0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,1 };
return unitaryDoubleQubitGate(qn_0, qn_1, P11_matrix, isConjugate,GateType::P11_GATE);
}
};
class CPUImplQPUWithOracle : public CPUImplQPU {
public:
QError controlOracularGate(std::vector<size_t> bits,
std::vector<size_t> controlbits,
bool is_dagger,
std::string name);
};
#endif
|
GraphMatRuntime.h | /******************************************************************************
** Copyright (c) 2015, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ******************************************************************************/
/* Narayanan Sundaram (Intel Corp.)
* ******************************************************************************/
#include "GMDP/gmdp.h"
//#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <vector>
#include <utility>
#include <sys/time.h>
#ifdef __ASSERT
#include <assert.h>
#endif
#include "Graph.h"
#include "GraphProgram.h"
#include "SPMV.h"
#define EASYPERF
#ifdef EASYPERF
#include <cstdint>
#include "easyperf.h"
#endif
namespace GraphMat {
const int UNTIL_CONVERGENCE = -1;
template<class T, class U, class V>
struct run_graph_program_temp_structure {
GraphMat::SpVec<GraphMat::DenseSegment<T> >* px;
GraphMat::SpVec<GraphMat::DenseSegment<U> >* py;
};
template<class T, class U, class V, class E>
struct run_graph_program_temp_structure<T,U,V> graph_program_init(const GraphProgram<T,U,V,E>& gp, const Graph<V, E>& g) {
struct run_graph_program_temp_structure<T,U,V> rgpts;
rgpts.px = new GraphMat::SpVec<GraphMat::DenseSegment<T> >(g.nvertices, GraphMat::get_global_nrank(), GraphMat::vector_partition_fn);
T _t;
rgpts.px->setAll(_t);
rgpts.py = new GraphMat::SpVec<GraphMat::DenseSegment<U> >(g.nvertices, GraphMat::get_global_nrank(), GraphMat::vector_partition_fn);
U _u;
rgpts.py->setAll(_u);
return rgpts;
}
template<class T, class U, class V>
void graph_program_clear(struct run_graph_program_temp_structure<T,U,V>& rgpts) {
delete rgpts.px;
delete rgpts.py;
}
template <class T,class U, class V, class E>
void send_message(bool a, V _v, T* b, void* gpv) {
GraphProgram<T,U,V,E>* gp = (GraphProgram<T,U,V,E>*) gpv;
if(a == true) {
gp->send_message(_v, *b);
}
}
template <class T, class U, class V, class E>
void apply_func(U y, V* b, void* gpv) {
GraphProgram<T,U,V,E>* gp = (GraphProgram<T,U,V,E>*) gpv;
gp->apply(y, *b);
}
template <class T, typename U, class V, class E>
void run_graph_program(GraphProgram<T,U,V,E>* gp, Graph<V,E>& g, int iterations=1, struct run_graph_program_temp_structure<T,U,V>* rgpts=NULL) { //iterations = -1 ==> until convergence
int it = 0;
int converged = 1;
struct timeval start, end, init_start, init_end, iteration_start, iteration_end;
double time;
int global_myrank = GraphMat::get_global_myrank();
gettimeofday(&init_start, 0);
auto act = gp->getActivity();
GraphMat::SpVec<GraphMat::DenseSegment<T> >* px;
GraphMat::SpVec<GraphMat::DenseSegment<U> >* py;
if (rgpts == NULL) {
px = new GraphMat::SpVec<GraphMat::DenseSegment<T> >(g.nvertices, GraphMat::get_global_nrank(), GraphMat::vector_partition_fn);
T _t;
px->setAll(_t);
py = new GraphMat::SpVec<GraphMat::DenseSegment<U> >(g.nvertices, GraphMat::get_global_nrank(), GraphMat::vector_partition_fn);
U _u;
py->setAll(_u);
}
GraphMat::SpVec<GraphMat::DenseSegment<T> >& x = (rgpts==NULL)?(*px):*(rgpts->px);//*px;
GraphMat::SpVec<GraphMat::DenseSegment<U> >& y = (rgpts==NULL)?(*py):*(rgpts->py);//*py;
if (act == ALL_VERTICES) {
g.setAllActive();
}
#ifdef __TIMING
printf("Nvertices = %d \n", g.getNumberOfVertices());
#endif
#ifdef EASYPERF
perf_init(4, EV_CYCLES, EV_INSTR, EV_BRANCH, EV_BRANCH_MISS);
//perf_init(2, EV_CYCLES | PERFMON_EVENTSEL_OS | PERFMON_EVENTSEL_USR, EV_INSTR | PERFMON_EVENTSEL_OS | PERFMON_EVENTSEL_USR);
//perf_init(2, EV_CYCLES | PERFMON_EVENTSEL_USR, EV_INSTR | PERFMON_EVENTSEL_USR);
uint64_t ezStart[4], ezEnd[4];
perf_read_all(ezStart);
#endif
gettimeofday(&init_end, 0);
#ifdef __TIMING
time = (init_end.tv_sec-init_start.tv_sec)*1e3+(init_end.tv_usec-init_start.tv_usec)*1e-3;
printf("GraphMat init time = %f ms \n", time);
#endif
while(1) {
gettimeofday(&iteration_start, 0);
GraphMat::Clear(&x);
GraphMat::Clear(&y);
converged = 1;
gettimeofday(&start, 0);
GraphMat::IntersectReduce(g.active, g.vertexproperty, &x, send_message<T,U,V,E>, (void*)gp);
#ifdef __TIMING
printf("x.length = %d \n", x.getNNZ());
#endif
gettimeofday(&end, 0);
#ifdef __TIMING
time = (end.tv_sec-start.tv_sec)*1e3+(end.tv_usec-start.tv_usec)*1e-3;
printf("Send message time = %.3f ms \n", time);
#endif
gettimeofday(&start, 0);
//do SpMV
if (gp->getOrder() == OUT_EDGES) {
SpMTSpV(g, gp, &x, &y);
} else if (gp->getOrder() == IN_EDGES) {
SpMSpV(g, gp, &x, &y);
} else if (gp->getOrder() == ALL_EDGES) {
SpMTSpV(g, gp, &x, &y);
SpMSpV(g, gp, &x, &y);
} else {
printf("Unrecognized option \n");
exit(1);
}
gettimeofday(&end, 0);
#ifdef __TIMING
time = (end.tv_sec-start.tv_sec)*1e3+(end.tv_usec-start.tv_usec)*1e-3;
printf("SPMV time = %.3f ms \n", time);
#endif
gettimeofday(&start, 0);
g.setAllInactive();
//update state and activity and check for convergence if needed
int nout = 0;
int total_search = 0;
int local_converged = 1;
converged = 1;
//GraphMat::IntersectReduce(g.active, y, &g.vertexproperty, set_y<U,V>);
//auto apply_func = set_y_apply<U,V>;
//GraphMat::Apply(y, &g.vertexproperty, apply_func<T,U,V>, (void*)gp);
for(int segmentId = 0 ; segmentId < y.nsegments ; segmentId++)
{
if(y.nodeIds[segmentId] == global_myrank)
{
auto segment = y.segments[segmentId]->properties;
auto vpValueArray = g.vertexproperty->segments[segmentId]->properties->value;
#pragma omp parallel for reduction(&:local_converged)
for (int i = 0; i < y.segments[segmentId]->num_ints; i++) {
unsigned int value = segment->bit_vector[i];
while (value != 0) {
int last_bit = _bit_scan_forward(value);
int idx = i*32 + last_bit;
V old_prop;
//old_prop = g.vertexproperty.segments[segmentId].properties->value[idx];
old_prop = vpValueArray[idx];
//gp->apply(segment->value[idx], g.vertexproperty.segments[segmentId].properties->value[idx]);
gp->apply(segment->value[idx], vpValueArray[idx]);
if (old_prop != vpValueArray[idx]) {
g.active->segments[segmentId]->properties->value[idx] = true;
GraphMat::set_bitvector(idx, g.active->segments[segmentId]->properties->bit_vector);
local_converged = 0;
}
value &= (~(1<<last_bit));
}
}
}
}
MPI_Allreduce(&local_converged, &converged, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);
gettimeofday(&end, 0);
#ifdef __TIMING
time = (end.tv_sec-start.tv_sec)*1e3+(end.tv_usec-start.tv_usec)*1e-3;
printf("Apply time = %.3f ms \n", time);
#endif
gp->do_every_iteration(it);
gettimeofday(&iteration_end, 0);
#ifdef __TIMING
time = (iteration_end.tv_sec-iteration_start.tv_sec)*1e3+(iteration_end.tv_usec-iteration_start.tv_usec)*1e-3;
printf("Iteration %d :: %f msec :: updated %d vertices :: changed %d vertices \n", it, time, y.getNNZ(), g.active->getNNZ());
#endif
if (act == ALL_VERTICES) {
g.setAllActive();
}
it++;
if (it == iterations) {
break;
}
if (iterations <= 0 && converged == 1) {
break;
}
}
struct timeval clear_start, clear_end;
gettimeofday(&clear_start, 0);
if (rgpts == NULL) {
delete px;
delete py;
}
#ifdef EASYPERF
perf_read_all(ezEnd);
printf("\nEasyperf: Cycles: %10lu Instrs: %10lu\n\n", ezEnd[0]-ezStart[0], ezEnd[1]-ezStart[1]);
printf("L2 refs: %lu, misses: %lu, miss ratio: %f\n", (ezEnd[2]-ezStart[2]), (ezEnd[3]-ezStart[3]), double(ezEnd[3]-ezStart[3])/double(ezEnd[2]-ezStart[2]));
perf_close();
#endif
gettimeofday(&clear_end, 0);
#ifdef __TIMING
time = (clear_end.tv_sec-clear_start.tv_sec)*1e3+(clear_end.tv_usec-clear_start.tv_usec)*1e-3;
printf("GraphMat clear time = %f msec \n", time);
#endif
printf("Completed %d iterations \n", it);
}
} //namespace GraphMat
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-19,32)),ceild(8*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(8*t1+Nx+7,32)),floord(16*t2+Nx+3,32)),floord(8*t3+Nx-5,32)),floord(16*t1-16*t2+Nz+Nx+5,32));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),8*t4+6);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(32*t4,4*t5+4);
ubv=min(32*t4+31,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
parallelEnvironment.h | /*****************************************************************************
* *
* Mixed-mode OpenMP/MPI MicroBenchmark Suite - Version 1.0 *
* *
* produced by *
* *
* Mark Bull, Jim Enright and Fiona Reid *
* *
* at *
* *
* Edinburgh Parallel Computing Centre *
* *
* email: markb@epcc.ed.ac.uk, fiona@epcc.ed.ac.uk *
* *
* *
* Copyright 2012, The University of Edinburgh *
* *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*-----------------------------------------------------------*/
/* Header file for parallelEnvironment.c. */
/* Contains variables declarations and function prototypes */
/* used to setup the MPI and OpenMP programming environment. */
/*-----------------------------------------------------------*/
#ifndef PARALLELENVIRONMENT_H_
#define PARALLELENVIRONMENT_H_
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "benchmarkSetup.h"
/* function prototypes */
int initParallelEnv();
int finaliseParallelEnv();
int findRank(int rankIn);
int findNeighbours();
int compareProcNames(int rankA, int rankB);
int setupCommunicators();
int procNameToHash();
int exchangeWorldRanks(int nodeA, int nodeB, int *otherWorldRank);
int sendProcName(int destNode, int srcNode, char *destProcName);
int crossCommBalance(int nodeA, int nodeB);
/* variable declaration */
/*MPI variables */
#define TAG 1 /* set tag to match messages */
int myMPIRank, numMPIprocs;
MPI_Comm comm, commCart;
MPI_Comm crossComm, localComm;
int localCommRank, localCommSize, crossCommRank;
MPI_Status status;
char myProcName[MPI_MAX_PROCESSOR_NAME];
int procNameLen;
MPI_Request requestID;
MPI_Status statusArray[4]; /* for haloexchange */
MPI_Request requestArray[4]; /* for haloexchange */
int leftNeighbour, rightNeighbour;
int sizeInteger;
int PPRanks[2]; /* ranks for pingpong or pingping */
/* OpenMP variables */
int myThreadID, numThreads;
/* make myThreadID a thread private variable */
#pragma omp threadprivate(myThreadID)
/*Array to hold the global ID for each thread */
int *globalIDarray;
int threadSupport;
#endif /* PARALLELENVIRONMENT_H_ */
|
absval_hcl_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: renzun@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <arm_neon.h>
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
float* idata = (float*)input_tensor->data;
float* odata = (float*)output_tensor->data;
int channel_num = input_tensor->dims[1];
int batch_number = input_tensor->dims[0];
int channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]);
int num_thread = exec_graph->num_thread;
#pragma omp parallel for num_threads(num_thread)
for (int c = 0; c < channel_num * batch_number; c++)
{
for (int i = 0; i < (channel_size & -4); i += 4)
{
float32x4_t _p = vld1q_f32(idata);
_p = vabsq_f32(_p);
vst1q_f32(odata, _p);
idata += 4;
odata += 4;
}
for (int i = channel_size & ~3; i < channel_size; i++)
{
if (*idata < 0)
*odata = -*idata;
else
*odata = *idata;
idata++;
odata++;
}
}
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
struct node* ir_node = exec_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
if (input_tensor->data_type != TENGINE_DT_FP32 || input_tensor->layout != TENGINE_LAYOUT_NCHW)
return 0;
return OPS_SCORE_BEST;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_absval_hcl_arm_op()
{
return register_builtin_node_ops(OP_ABSVAL, &hcl_node_ops);
}
int unregister_absval_hcl_arm_op()
{
return unregister_builtin_node_ops(OP_ABSVAL, &hcl_node_ops);
} |
aomp_mappings.c | #include <stdio.h>
#include <omp.h>
#include <string.h>
//Shared Variables
int THREAD_LIMIT = 4;
int MAX_TEAMS = 128;
int GENERIC = 0;
int SPMD = 1;
int MAX_THREADS_PER_TEAM = 256;
#ifdef WAVE_SIZE
int WARP_SIZE = WAVE_SIZE;
#else
int WARP_SIZE = 64;
#endif
/*
* Function: recordError
* Description: Updates error number and prints error messages
*/
void recordError(int* error , char *message, int iteration, int * array, unsigned long long *mask ){
(*error)++;
if(mask == NULL)
fprintf(stderr,"%s IS INCORRECT! Iteration: %d Value: %d\n", message, iteration, array[iteration]);
else
fprintf(stderr,"%s IS INCORRECT! Iteration: %d Value: %llx\n", message, iteration, mask[iteration]);
}
int main()
{
printf("warpsize %d\n", WARP_SIZE);
//Determine which GPU type (NVIDIA or AMD)
char* nvidia= "sm";
char* aomp_gpu= getenv("AOMP_GPU");
int isAMDGPU = 1;
if(aomp_gpu && strstr(aomp_gpu, nvidia) != NULL)
isAMDGPU = 0;
// a hacky way to know the default number of teams
#pragma omp target teams map(tofrom:MAX_TEAMS)
{
if (omp_get_team_num() == 0)
MAX_TEAMS = omp_get_num_teams();
}
fprintf(stderr, "MAX_TEAMS: %d\n", MAX_TEAMS);
//Logic for correct shared variables - AMD vs NVIDIA GPU
if(!isAMDGPU){
printf("%s\n", getenv("AOMP_GPU"));
MAX_THREADS_PER_TEAM = 128;
WARP_SIZE = 32;
}
int N = 128;
int NN = 1024;
int thread_num[NN];
int team_num[NN];
int default_dev[NN];
int warp_id[NN];
int lane_id[NN];
int smid[NN];
int is_spmd_mode[NN];
int master_thread_id[NN];
int num_teams[NN];
int num_threads[NN];
unsigned long long active_mask[NN];
unsigned long long mask = 0;
int i;
int correctTeamNum = -1;
int correctNumTeams = -1;
int correctWarpId = -1;
int remainder = 0;
int errors = 0;
//Initialize arrays
for (i=0; i<NN; i++)
active_mask[i] = 0;
for (i=0; i<NN; i++)
thread_num[i]=team_num[i]=default_dev[i]=warp_id[i]=lane_id[i]=master_thread_id[i]=smid[i]=is_spmd_mode[i]=num_threads[i]=num_teams[i] = -1;
fprintf(stderr,"#pragma omp target teams distribute parallel for thread_limit(4)\n");
#pragma omp target teams distribute parallel for thread_limit(4)
{
for (int j = 0; j< N; j++) {
thread_num[j] = omp_get_thread_num();
num_threads[j] = omp_get_num_threads();
team_num[j] = omp_get_team_num();
num_teams[j] = omp_get_num_teams();
default_dev[j] = omp_get_default_device();
warp_id[j] = omp_ext_get_warp_id();
lane_id[j] = omp_ext_get_lane_id();
active_mask[j] = omp_ext_get_active_threads_mask();
smid[j] = omp_ext_get_smid();
master_thread_id[j] = omp_ext_get_master_thread_id();
is_spmd_mode[j] = omp_ext_is_spmd_mode();
}
}
fprintf(stderr," i thrd# team# dev# warp# lane# MastThrd smid SPMD num_threads num_teams ActiveMask\n");
for (i=0; i<N; i++)
fprintf(stderr," %4d %4d %4d %4d %4d %4d %4d %4d %4d %10d %10d %16llx\n",
i,thread_num[i],team_num[i],default_dev[i],warp_id[i],lane_id[i],master_thread_id[i],smid[i],is_spmd_mode[i],num_threads[i], num_teams[i],active_mask[i]);
//Verify Results - #pragma omp target teams distribute parallel for thread_limit(4)
for (i = 0; i < N; i++){
//check thread #
if (thread_num[i] != i % THREAD_LIMIT)
recordError(&errors, "THREAD NUMBER", i, thread_num, NULL);
//check team #
if (i % THREAD_LIMIT == 0){
correctTeamNum++;
if(isAMDGPU)
correctTeamNum = correctTeamNum % MAX_TEAMS;
}
if (team_num[i] != correctTeamNum)
recordError(&errors, "TEAM NUMBER", i, team_num, NULL);
//check device #, We use default device (0) for testing
if (default_dev[i] != 0)
recordError(&errors, "DEVICE NUMBER", i, default_dev, NULL);
//check warp #
if (warp_id[i] != 0)
recordError(&errors, "WARP NUMBER", i, warp_id, NULL);
//check lane #
if (lane_id[i] != i % THREAD_LIMIT)
recordError(&errors, "LANE NUMBER", i, lane_id, NULL);
//check master thread #
if (master_thread_id[i] != 0 )
recordError(&errors, "MASTER THREAD NUMBER", i, master_thread_id, NULL);
//check SPMD mode #
if (is_spmd_mode[i] != SPMD )
recordError(&errors, "SPMD NUMBER", i, is_spmd_mode, NULL);
//check num threads
if (num_threads[i] != THREAD_LIMIT )
recordError(&errors, "NUM THREADS", i, num_threads, NULL);
//check num teams
//If number of iterations is not divisible by THREAD_LIMIT get the ceiling
if(N % THREAD_LIMIT != 0)
correctNumTeams = ((N + num_threads[i]) / num_threads[i]);
else
correctNumTeams = N / THREAD_LIMIT;
if (correctNumTeams > MAX_TEAMS && isAMDGPU)
correctNumTeams = MAX_TEAMS;
if (num_teams[i] != correctNumTeams)
recordError(&errors, "NUM TEAMS", i, num_teams, NULL);
//check active mask
mask = 0;
if(N % THREAD_LIMIT != 0){
remainder = N % THREAD_LIMIT;
//set bit mask to proper value
for (int j = 0 ; j < remainder; j++){
mask = mask << 1;
mask = mask + 1;
}
}
//Mask for last evenly divided iteration
if (i < N - remainder){
mask = 0xf;
}
if (active_mask[i] != mask)
recordError(&errors, "ACTIVE MASK", i, NULL, active_mask);
}
//Reset Arrays
for (i=0; i<NN; i++)
active_mask[i] = 0;
for (i=0; i<NN; i++)
thread_num[i]=team_num[i]=default_dev[i]=warp_id[i]=lane_id[i]=master_thread_id[i]=smid[i]=is_spmd_mode[i]=num_threads[i]=num_teams[i] = -1;
fprintf(stderr,"#pragma omp target teams distribute parallel for\n");
#pragma omp target teams distribute parallel for
{
for (int j = 0; j< N; j++) {
thread_num[j] = omp_get_thread_num();
num_threads[j] = omp_get_num_threads();
team_num[j] = omp_get_team_num();
num_teams[j] = omp_get_num_teams();
default_dev[j] = omp_get_default_device();
warp_id[j] = omp_ext_get_warp_id();
lane_id[j] = omp_ext_get_lane_id();
active_mask[j] = omp_ext_get_active_threads_mask();
smid[j] = omp_ext_get_smid();
master_thread_id[j] = omp_ext_get_master_thread_id();
is_spmd_mode[j] = omp_ext_is_spmd_mode();
}
}
fprintf(stderr," i thrd# team# dev# warp# lane# MastThrd smid SPMD num_threads num_teams ActiveMask\n");
for (i=0; i<N; i++)
fprintf(stderr," %4d %4d %4d %4d %4d %4d %4d %4d %4d %10d %10d %16llx\n",
i,thread_num[i],team_num[i],default_dev[i],warp_id[i],lane_id[i],master_thread_id[i],smid[i],is_spmd_mode[i],num_threads[i], num_teams[i],active_mask[i]);
//Verify Results - #pragma omp target teams distribute parallel for
correctTeamNum = -1;
correctNumTeams = -1;
//int correctWarpId = -1;
//Verify Results
for (i = 0; i < N; i++){
//check thread #
if (thread_num[i] != i % MAX_THREADS_PER_TEAM)
recordError(&errors, "THREAD NUMBER", i, thread_num, NULL);
//check team #
if (i % MAX_THREADS_PER_TEAM == 0){
correctTeamNum++;
correctTeamNum = correctTeamNum % MAX_TEAMS;
}
if (team_num[i] != correctTeamNum)
recordError(&errors, "TEAM NUMBER", i, team_num, NULL);
//check device #, We use default device (0) for testing
if (default_dev[i] != 0)
recordError(&errors, "DEVICE NUMBER", i, default_dev, NULL);
//check warp #
if (i % WARP_SIZE == 0){
correctWarpId++;
correctWarpId = correctWarpId % (MAX_THREADS_PER_TEAM/WARP_SIZE);
}
if (warp_id[i] != correctWarpId)
recordError(&errors, "WARP NUMBER", i, warp_id, NULL);
//check lane #
if (lane_id[i] != i % WARP_SIZE)
recordError(&errors, "LANE NUMBER", i, lane_id, NULL);
//check master thread #
if (master_thread_id[i] != MAX_THREADS_PER_TEAM - WARP_SIZE)
recordError(&errors, "MASTER THREAD NUMBER", i, master_thread_id, NULL);
//check SPMD mode #
if (is_spmd_mode[i] != SPMD )
recordError(&errors, "SPMD NUMBER", i, is_spmd_mode, NULL);
//check num threads
if (num_threads[i] != MAX_THREADS_PER_TEAM )
recordError(&errors, "NUM THREADS", i, num_threads, NULL);
//check num teams
//If number of iterations is not divisible by MAX_THREADS_PER_TEAM get the ceiling
if(N % MAX_THREADS_PER_TEAM != 0)
correctNumTeams = ((N + num_threads[i]) / num_threads[i]);
else
correctNumTeams = N / MAX_THREADS_PER_TEAM;
if (num_teams[i] != correctNumTeams)
recordError(&errors, "NUM TEAMS", i, num_teams, NULL);
//check active mask
remainder = 0;
mask = 0;
//Set mask for 64 or fewer active threads in first warp
if (N < WARP_SIZE + 1){
remainder = N % WARP_SIZE;
}
else
remainder = (N % MAX_THREADS_PER_TEAM) % WARP_SIZE;
//Set mask for warps with full (64) active threads
if (i < N - remainder){
if(WARP_SIZE == 64)
mask = 0xffffffffffffffff;
else
mask = 0xffffffff;
}
else{ //set mask for iterations with non full warps
mask = 0;
for (int j = 0 ; j < remainder; j++){
mask = mask << 1;
mask = mask + 1;
}
}
if (active_mask[i] != mask){
recordError(&errors, "ACTIVE MASK", i, NULL, active_mask);
}
}
//Reset Arrays
for (i=0; i<NN; i++)
active_mask[i] = 0;
for (i=0; i<NN; i++)
thread_num[i]=team_num[i]=default_dev[i]=warp_id[i]=lane_id[i]=master_thread_id[i]=smid[i]=is_spmd_mode[i]=num_threads[i]=num_teams[i] = -1;
fprintf(stderr,"#pragma omp target teams \n");
#pragma omp target teams
{
int j = omp_get_team_num();
thread_num[j] = omp_get_thread_num();
num_threads[j] = omp_get_num_threads();
team_num[j] = omp_get_team_num();
num_teams[j] = omp_get_num_teams();
default_dev[j] = omp_get_default_device();
warp_id[j] = omp_ext_get_warp_id();
lane_id[j] = omp_ext_get_lane_id();
active_mask[j] = omp_ext_get_active_threads_mask();
smid[j] = omp_ext_get_smid();
master_thread_id[j] = omp_ext_get_master_thread_id();
is_spmd_mode[j] = omp_ext_is_spmd_mode();
}
fprintf(stderr," i thrd# team# dev# warp# lane# MastThrd smid SPMD num_threads num_teams ActiveMask\n");
for (i=0; i<N; i++)
fprintf(stderr," %4d %4d %4d %4d %4d %4d %4d %4d %4d %10d %10d %16llx\n",
i,thread_num[i],team_num[i],default_dev[i],warp_id[i],lane_id[i],master_thread_id[i],smid[i],is_spmd_mode[i],num_threads[i],num_teams[i],active_mask[i]);
//Verify Results - #pragma omp target teams
correctTeamNum = -1;
correctNumTeams = -1;
//Verify Results
for (i = 0; i < N; i++){
//Only check iterations up to MAX_TEAMS
if(i < MAX_TEAMS){
//check thread #
if (thread_num[i] != 0)
recordError(&errors, "THREAD NUMBER", i, thread_num, NULL);
//check team #
if (team_num[i] != i)
recordError(&errors, "TEAM NUMBER", i, team_num, NULL);
//check device #, We use default device (0) for testing
if (default_dev[i] != 0)
recordError(&errors, "DEVICE NUMBER", i, default_dev, NULL);
//check warp #
if ((warp_id[i] != (MAX_THREADS_PER_TEAM - WARP_SIZE) / WARP_SIZE) &&
(warp_id[i] != (MAX_THREADS_PER_TEAM - WARP_SIZE) / WARP_SIZE +1))
recordError(&errors, "WARP NUMBER", i, warp_id, NULL);
//check lane #
if (lane_id[i] != 0)
recordError(&errors, "LANE NUMBER", i, lane_id, NULL);
//check master thread #
if ((master_thread_id[i] != MAX_THREADS_PER_TEAM - WARP_SIZE) &&
(master_thread_id[i] != MAX_THREADS_PER_TEAM))
recordError(&errors, "MASTER THREAD NUMBER", i, master_thread_id, NULL);
//check SPMD mode #
if (is_spmd_mode[i] != GENERIC )
recordError(&errors, "SPMD NUMBER", i, is_spmd_mode, NULL);
//check num threads
if (num_threads[i] != 1 )
recordError(&errors, "NUM THREADS", i, num_threads, NULL);
//check num teams
//If number of iterations is not divisible by MAX_THREADS_PER_TEAM get the ceiling
if (num_teams[i] != MAX_TEAMS )
recordError(&errors, "NUM TEAMS", i, num_teams, NULL);
//check active mask
remainder = 0;
mask = 1;
if (active_mask[i] != mask){
recordError(&errors, "ACTIVE MASK", i, NULL, active_mask);
}
}
else{
if(thread_num[i] != -1 || team_num[i] != -1 || default_dev[i] != -1 || warp_id[i] != -1 || lane_id[i] != -1 || master_thread_id[i] != -1 || is_spmd_mode[i] != -1 || num_threads[i] != -1 || num_teams[i] != -1 || active_mask[i] != 0){
fprintf(stderr, "Data after iteration %d is changed and should be untouched!!\n", MAX_TEAMS - 1);
errors++;
}
}
}
//Print results and return total errors
if(!errors){
fprintf(stderr, "Success\n");
return 0;
}
else {
fprintf(stderr, "Fail\n");
fprintf(stderr, "Errors: %d\n", errors);
return 1;
}
}
|
shortcut_layer.c | #include "shortcut_layer.h"
#include "convolutional_layer.h"
#include "dark_cuda.h"
#include "blas.h"
#include "utils.h"
#include "gemm.h"
#include <stdio.h>
#include <assert.h>
layer make_shortcut_layer(int batch, int n, int *input_layers, int* input_sizes, int w, int h, int c,
float **layers_output, float **layers_delta, float **layers_output_gpu, float **layers_delta_gpu, WEIGHTS_TYPE_T weights_type, WEIGHTS_NORMALIZATION_T weights_normalizion,
ACTIVATION activation, int train)
{
fprintf(stderr, "Shortcut Layer: ");
int i;
for(i = 0; i < n; ++i) fprintf(stderr, "%d, ", input_layers[i]);
layer l = { (LAYER_TYPE)0 };
l.train = train;
l.type = SHORTCUT;
l.batch = batch;
l.activation = activation;
l.n = n;
l.input_layers = input_layers;
l.input_sizes = input_sizes;
l.layers_output = layers_output;
l.layers_delta = layers_delta;
l.weights_type = weights_type;
l.weights_normalizion = weights_normalizion;
l.learning_rate_scale = 1; // not necessary
//l.w = w2;
//l.h = h2;
//l.c = c2;
l.w = l.out_w = w;
l.h = l.out_h = h;
l.c = l.out_c = c;
l.outputs = w*h*c;
l.inputs = l.outputs;
//if(w != w2 || h != h2 || c != c2) fprintf(stderr, " w = %d, w2 = %d, h = %d, h2 = %d, c = %d, c2 = %d \n", w, w2, h, h2, c, c2);
l.index = l.input_layers[0];
if (train) l.delta = (float*)xcalloc(l.outputs * batch, sizeof(float));
l.output = (float*)xcalloc(l.outputs * batch, sizeof(float));
l.nweights = 0;
if (l.weights_type == PER_FEATURE) l.nweights = (l.n + 1);
else if (l.weights_type == PER_CHANNEL) l.nweights = (l.n + 1) * l.c;
if (l.nweights > 0) {
l.weights = (float*)calloc(l.nweights, sizeof(float));
float scale = sqrt(2. / l.nweights);
for (i = 0; i < l.nweights; ++i) l.weights[i] = 1 + 0.01*rand_uniform(-1, 1);// scale*rand_uniform(-1, 1); // rand_normal();
if (train) l.weight_updates = (float*)calloc(l.nweights, sizeof(float));
l.update = update_shortcut_layer;
}
l.forward = forward_shortcut_layer;
l.backward = backward_shortcut_layer;
#ifndef GPU
if (l.activation == SWISH || l.activation == MISH) l.activation_input = (float*)calloc(l.batch*l.outputs, sizeof(float));
#endif // GPU
#ifdef GPU
if (l.activation == SWISH || l.activation == MISH) l.activation_input_gpu = cuda_make_array(l.activation_input, l.batch*l.outputs);
l.forward_gpu = forward_shortcut_layer_gpu;
l.backward_gpu = backward_shortcut_layer_gpu;
if (l.nweights > 0) {
l.update_gpu = update_shortcut_layer_gpu;
l.weights_gpu = cuda_make_array(l.weights, l.nweights);
if (train) l.weight_updates_gpu = cuda_make_array(l.weight_updates, l.nweights);
}
if (train) l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
l.input_sizes_gpu = cuda_make_int_array_new_api(input_sizes, l.n);
l.layers_output_gpu = (float**)cuda_make_array_pointers((void**)layers_output_gpu, l.n);
l.layers_delta_gpu = (float**)cuda_make_array_pointers((void**)layers_delta_gpu, l.n);
#endif // GPU
l.bflops = l.out_w * l.out_h * l.out_c * l.n / 1000000000.;
if (l.weights_type) l.bflops *= 2;
fprintf(stderr, " wt = %d, wn = %d, outputs:%4d x%4d x%4d %5.3f BF\n", l.weights_type, l.weights_normalizion, l.out_w, l.out_h, l.out_c, l.bflops);
return l;
}
void resize_shortcut_layer(layer *l, int w, int h, network *net)
{
//assert(l->w == l->out_w);
//assert(l->h == l->out_h);
l->w = l->out_w = w;
l->h = l->out_h = h;
l->outputs = w*h*l->out_c;
l->inputs = l->outputs;
if (l->train) l->delta = (float*)xrealloc(l->delta, l->outputs * l->batch * sizeof(float));
l->output = (float*)xrealloc(l->output, l->outputs * l->batch * sizeof(float));
int i;
for (i = 0; i < l->n; ++i) {
int index = l->input_layers[i];
l->input_sizes[i] = net->layers[index].outputs;
l->layers_output[i] = net->layers[index].output;
l->layers_delta[i] = net->layers[index].delta;
assert(l->w == net->layers[index].out_w && l->h == net->layers[index].out_h);
}
if (l->activation == SWISH || l->activation == MISH) l->activation_input = (float*)realloc(l->activation_input, l->batch*l->outputs * sizeof(float));
#ifdef GPU
cuda_free(l->output_gpu);
l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
if (l->train) {
cuda_free(l->delta_gpu);
l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch);
}
float **layers_output_gpu = (float **)calloc(l->n, sizeof(float *));
float **layers_delta_gpu = (float **)calloc(l->n, sizeof(float *));
for (i = 0; i < l->n; ++i) {
const int index = l->input_layers[i];
layers_output_gpu[i] = net->layers[index].output_gpu;
layers_delta_gpu[i] = net->layers[index].delta_gpu;
}
memcpy_ongpu(l->input_sizes_gpu, l->input_sizes, l->n * sizeof(int));
memcpy_ongpu(l->layers_output_gpu, layers_output_gpu, l->n * sizeof(float*));
memcpy_ongpu(l->layers_delta_gpu, layers_delta_gpu, l->n * sizeof(float*));
free(layers_output_gpu);
free(layers_delta_gpu);
if (l->activation == SWISH || l->activation == MISH) {
cuda_free(l->activation_input_gpu);
l->activation_input_gpu = cuda_make_array(l->activation_input, l->batch*l->outputs);
}
#endif
}
void forward_shortcut_layer(const layer l, network_state state)
{
int from_w = state.net.layers[l.index].w;
int from_h = state.net.layers[l.index].h;
int from_c = state.net.layers[l.index].c;
if (l.nweights == 0 && l.n == 1 && from_w == l.w && from_h == l.h && from_c == l.c) {
int size = l.batch * l.w * l.h * l.c;
int i;
#pragma omp parallel for
for(i = 0; i < size; ++i)
l.output[i] = state.input[i] + state.net.layers[l.index].output[i];
}
else {
shortcut_multilayer_cpu(l.outputs * l.batch, l.outputs, l.batch, l.n, l.input_sizes, l.layers_output, l.output, state.input, l.weights, l.nweights, l.weights_normalizion);
}
//copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1);
//shortcut_cpu(l.batch, from_w, from_h, from_c, state.net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.output);
//activate_array(l.output, l.outputs*l.batch, l.activation);
if (l.activation == SWISH) activate_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.output);
else if (l.activation == MISH) activate_array_mish(l.output, l.outputs*l.batch, l.activation_input, l.output);
else activate_array_cpu_custom(l.output, l.outputs*l.batch, l.activation);
}
void backward_shortcut_layer(const layer l, network_state state)
{
if (l.activation == SWISH) gradient_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.delta);
else if (l.activation == MISH) gradient_array_mish(l.outputs*l.batch, l.activation_input, l.delta);
else gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
backward_shortcut_multilayer_cpu(l.outputs * l.batch, l.outputs, l.batch, l.n, l.input_sizes,
l.layers_delta, state.delta, l.delta, l.weights, l.weight_updates, l.nweights, state.input, l.layers_output, l.weights_normalizion);
//axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1);
//shortcut_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta);
}
void update_shortcut_layer(layer l, int batch, float learning_rate_init, float momentum, float decay)
{
if (l.nweights > 0) {
float learning_rate = learning_rate_init*l.learning_rate_scale;
//float momentum = a.momentum;
//float decay = a.decay;
//int batch = a.batch;
axpy_cpu(l.nweights, -decay*batch, l.weights, 1, l.weight_updates, 1);
axpy_cpu(l.nweights, learning_rate / batch, l.weight_updates, 1, l.weights, 1);
scal_cpu(l.nweights, momentum, l.weight_updates, 1);
}
}
#ifdef GPU
void forward_shortcut_layer_gpu(const layer l, network_state state)
{
//copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
//simple_copy_ongpu(l.outputs*l.batch, state.input, l.output_gpu);
//shortcut_gpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
//input_shortcut_gpu(state.input, l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
//-----------
//if (l.outputs == l.input_sizes[0])
//if(l.n == 1 && l.nweights == 0)
//{
// input_shortcut_gpu(state.input, l.batch, state.net.layers[l.index].w, state.net.layers[l.index].h, state.net.layers[l.index].c,
// state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
//}
//else
{
shortcut_multilayer_gpu(l.outputs, l.batch, l.n, l.input_sizes_gpu, l.layers_output_gpu, l.output_gpu, state.input, l.weights_gpu, l.nweights, l.weights_normalizion);
}
if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
void backward_shortcut_layer_gpu(const layer l, network_state state)
{
if (l.activation == SWISH) gradient_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else if (l.activation == MISH) gradient_array_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_shortcut_multilayer_gpu(l.outputs, l.batch, l.n, l.input_sizes_gpu, l.layers_delta_gpu, state.delta, l.delta_gpu,
l.weights_gpu, l.weight_updates_gpu, l.nweights, state.input, l.layers_output_gpu, l.weights_normalizion);
//axpy_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1, state.delta, 1);
//shortcut_gpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta_gpu, l.w, l.h, l.c, state.net.layers[l.index].delta_gpu);
}
void update_shortcut_layer_gpu(layer l, int batch, float learning_rate_init, float momentum, float decay)
{
if (l.nweights > 0) {
float learning_rate = learning_rate_init*l.learning_rate_scale;
//float momentum = a.momentum;
//float decay = a.decay;
//int batch = a.batch;
reset_nan_and_inf(l.weight_updates_gpu, l.nweights);
fix_nan_and_inf(l.weights_gpu, l.nweights);
axpy_ongpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1);
//if (l.clip) {
// constrain_gpu(l.nweights, l.clip, l.weights_gpu, 1);
//}
}
}
void pull_shortcut_layer(layer l)
{
cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights);
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaStreamSynchronize(get_cuda_stream()));
}
void push_shortcut_layer(layer l)
{
cuda_push_array(l.weights_gpu, l.weights, l.nweights);
CHECK_CUDA(cudaPeekAtLastError());
}
#endif
|
GB_binop__lt_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lt_bool
// A.*B function (eWiseMult): GB_AemultB__lt_bool
// A*D function (colscale): GB_AxD__lt_bool
// D*A function (rowscale): GB_DxB__lt_bool
// C+=B function (dense accum): GB_Cdense_accumB__lt_bool
// C+=b function (dense accum): GB_Cdense_accumb__lt_bool
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_bool
// C=scalar+B GB_bind1st__lt_bool
// C=scalar+B' GB_bind1st_tran__lt_bool
// C=A+scalar GB_bind2nd__lt_bool
// C=A'+scalar GB_bind2nd_tran__lt_bool
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
bool bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_BOOL || GxB_NO_LT_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lt_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lt_bool
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lt_bool
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lt_bool
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lt_bool
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__lt_bool
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lt_bool
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lt_bool
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
bool bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lt_bool
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
bool aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB_bind1st_tran__lt_bool
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB_bind2nd_tran__lt_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
saxpy_omp.c | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
int main(void)
{
int N = 1 << 20;
float *x, *y, *d_x, *d_y;
x = (float *)malloc(N * sizeof(float));
y = (float *)malloc(N * sizeof(float));
for (int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
#pragma omp parallel for
for (int i = 0; i < N; i++)
{
y[i] = 2.0f * x[i] + y[i];
}
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, abs(y[i] - 4.0f));
printf("Max error: %f\n", maxError);
free(x);
free(y);
} |
mgm.c | /*
-----------------------------------------------------------------------
Copyright 2013 Pieter Ghysels, University of Antwerp
Contact: ghyselsp@gmail.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
-----------------------------------------------------------------------
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
typedef enum {V,W,S} CYCLE;
typedef enum {JACOBI,CHEBYSHEV} METHOD;
typedef struct{ double **p; int n; double h; int lda; } GRID;
typedef GRID *GRIDLIST;
void jacobi( GRID* u, GRID* f, GRID* tmp, int nu );
#ifdef PLUTO
#include "jacobi.pluto.h"
#else
#include "jacobi.h"
#endif
double get_time();
void init_rhs( GRID u);
void init_border( GRID u );
void init_solution( GRID u );
void create_grid( GRID *grid, int n );
void free_grid( GRID *grid );
void create_grid_list( GRIDLIST *list, int n, int lmax );
void free_grid_list( GRIDLIST *list, int lmax );
void print_grid( GRID u, char *c );
void zero_grid( GRID u );
void init_grid( GRID u, double a );
void random_grid( GRID u );
double error( GRID u1, GRID u2 );
void correct( GRID u, GRID c );
void get_options( int *iters, int *nc, int *lmax, int *nu1, int *nu2, int *nuc, int *print_res );
void print_errors( GRID u, GRID f, GRID uex, int it );
void print_header( int lmax, int nc, int nu1, int nu2, int nuc, CYCLE cycle );
double residual_norm( GRID u, GRID f );
void residual( GRID u, GRID f, GRID d );
void interpolation( GRID uf, GRID uc );
void vcycle( GRIDLIST u, GRIDLIST f, GRIDLIST w, int l, int nu1, int nu2, int nuc );
void mgrid( GRIDLIST u, GRIDLIST f, GRIDLIST w, int l, int nu1, int nu2, int nuc, CYCLE cycle );
double get_time() {
struct timeval t;
gettimeofday(&t, NULL);
return (double)t.tv_sec + (double)t.tv_usec * 1.e-6;
}
void init_rhs( GRID u ) {
//double x, y;
#pragma omp parallel for
for (int j=0; j<=u.n; j++) {
double y = j*u.h;
for (int i=0; i<=u.n; i++) {
double x=i*u.h;
u.p[i][j] = 0.0;
//u.p[i][j] = /*0.0;*/ -2.0*sin(x+y);
}
}
}
void init_border( GRID u ) {
double x, y;
for (int i=0; i<=u.n+1; i++) {
x = i*u.h;
u.p[i][0] = 0.0; u.p[i][u.n+1] = 0.0;
//u.p[i][0] = x*x; u.p[i][u.n+1] = 1.0+x*x;
//u.p[i][0] = sin(x); u.p[i][u.n+1] = sin(1.0+x);
//u.p[i][0] = x; u.p[i][u.n+1] = 1.0+x;
}
for (int j=1; j<=u.n; j++) {
y = j*u.h;
u.p[0][j] = 0.0; u.p[u.n+1][j] = 0.0;
//u.p[0][j] = y*y; u.p[u.n+1][j] = y*y+1.0;
//u.p[0][j] = sin(y); u.p[u.n+1][j] = sin(1.0+y);
//u.p[0][j] = y; u.p[u.n+1][j] = 1.0+y;
}
}
void init_solution( GRID u ) {
//double x, y;
#pragma omp parallel for
for (int j=0; j<=u.n+1; j++) {
double y= j*u.h;
for (int i=0; i<=u.n+1; i++) {
double x=i*u.h;
u.p[i][j] = 0.0;
//u.p[i][j] = x*x+y*y;
//u.p[i][j] = sin(x+y);
//u.p[i][j] = x+y;
}
}
}
void create_grid( GRID *grid, int n ) {
grid->n = n;
grid->h = 1.0/(double)(n+1);
grid->lda = n+2;//+(n+2)|64;
#ifdef USE_MM_MALLOC
grid->p = (double **) _mm_malloc((n+2)*sizeof(double *),64);
grid->p[0] = (double *) _mm_malloc((n+2)*(grid->lda)*sizeof(double),64);
#else
grid->p = (double **) malloc((n+2)*sizeof(double *));
grid->p[0] = (double *) malloc((n+2)*(grid->lda)*sizeof(double));
#endif
for (int i=1; i<=n+1; i++)
grid->p[i] = grid->p[i-1] + grid->lda;
zero_grid(*grid);
}
void free_grid( GRID *grid ) {
#ifdef USE_MM_MALLOC
_mm_free(grid->p[0]);
_mm_free(grid->p);
#else
free(grid->p[0]);
free(grid->p);
#endif
}
void create_grid_list( GRIDLIST *list, int n, int lmax ) {
*list = (GRID *) calloc(lmax+1,sizeof(GRID));
if(*list == NULL) printf("create_grid_list: not enough memory");
for (int i=0; i<=lmax; i++) {
create_grid((*list)+i,n);
n = 2*n+1;
}
}
void free_grid_list( GRIDLIST *list, int lmax ) {
for (int i=0; i<=lmax; i++)
free_grid((*list)+i);
free(*list);
}
void print_grid( GRID u, char *c ) {
/*
printf("\n GRID %s\n",c);
for (int j=u.n+1; j>=0; j--) {
printf("\n row %d:\n ",j);
for (int i=0; i<=u.n +1; i++)
printf("%f ",u.p[i][j]);
}
printf("\n");
*/
printf("\n-------------------------\n");
for(int i = 0; i < 5; i++){
for(int j = 0; j < 5; j++)
printf("%6e\t", u.p[i][j]);
printf("\t...,\t");
for(int j = u.n+1-4; j <= u.n+1; j++)
printf("%6e\t", u.p[i][j]);
printf("\n");
}
printf("\t...,\n");
for(int i = u.n+1-4; i <= u.n+1; i++){
for(int j = 0; j < 5; j++)
printf("%6e\t", u.p[i][j]);
printf("\t...,\t");
for(int j = u.n+1-4; j <= u.n+1; j++)
printf("%6e\t", u.p[i][j]);
printf("\n");
}
}
void zero_grid( GRID u ) {
#pragma omp parallel for
for (int i=0; i<= u.n+1; i++)
for(int j=0; j<= u.n+1; j++)
u.p[i][j] = 0;
}
void random_grid( GRID u ) {
#pragma omp parallel for
for (int i=0; i<= u.n+1; i++)
for(int j=0; j<= u.n+1; j++)
u.p[i][j] = rand() / RAND_MAX;
}
void init_grid( GRID u, double a ) {
#pragma omp parallel for
for (int i=0; i<=u.n+1; i++)
for(int j=0; j<=u.n+1; j++)
u.p[i][j] = a;
}
double error( GRID u1, GRID u2 ) {
double tmp = 0.0;
#pragma omp parallel for reduction(+:tmp)
for (int i=1; i<=u1.n; i++)
for(int j=1; j<=u1.n; j++)
tmp += (u1.p[i][j]-u2.p[i][j])*(u1.p[i][j]-u2.p[i][j]);
tmp = sqrt(tmp)/u1.n;
return tmp;
}
void correct( GRID u, GRID c ) {
#pragma omp parallel for
for (int i=1; i<=u.n; i++)
for (int j=1; j<=u.n; j++)
u.p[i][j] += c.p[i][j];
}
void get_options( int *iters, int *nc, int *lmax, int *nu1, int *nu2, int *nuc, CYCLE *cycle, int *print_res ) {
char cyc[2];
char inp[2];
int e;
printf("# number of (internal) gridlines on coarsest level: ");
e = scanf("%d",nc);
printf("# additional finer levels: ");
e = scanf("%d",lmax);
printf("# multigrid cycle type (V), (W): " );
e = scanf("%1s",cyc);
switch(cyc[0]) {
case 'V': case 'v': *cycle=V; break;
case 'W': case 'w': *cycle=W; break;
case 'S': case 's': *cycle=S; break;
default : printf("wrong cycle type\n");
}
printf("# pre-smoothing steps: ");
e = scanf("%d",nu1);
printf("# post-smoothing steps: ");
e = scanf("%d",nu2);
printf("# coarse-grid relaxation steps: ");
e = scanf("%d",nuc);
printf("# max iterations on finest level: ");
e = scanf("%d",iters);
printf("# time ? (y/n) : ");
e = scanf("%1s", inp);
switch(inp[0]) {
case 'y': case 'Y': *print_res = 0; break;
case 'n': case 'N': *print_res = 1; break;
default : printf("wrong input");
}
}
double oldresid,olderr;
void print_errors( GRID u, GRID f, GRID uex, int it ) {
double rhoresid,rhoerr;
double resid = residual_norm(u,f);
double err = error(u,uex);
if (it==0) {
printf("\n# %3dx%-3d : %-3d : %7e %7e\n",u.n,u.n,it,err,resid);
} else {
rhoresid = (oldresid==0.0) ? 1.0 : resid/oldresid;
rhoerr = (olderr ==0.0) ? 1.0 : err/olderr;
printf("# %3dx%-3d : %-3d : %7e %7e : %6f %6f \n",u.n,u.n,it, err,resid,rhoerr,rhoresid);
}
oldresid = resid;
olderr = err;
}
void print_header( int lmax, int nc, int nu1, int nu2, int nuc, CYCLE cycle ) {
char cyc[] = {'V', 'W', 'S'};
printf(": levels: 0..%-2d, coarse grid: %3dx%-3d, %c cycle \n",lmax,nc,nc,cyc[(int)cycle]);
printf("# pre-smoothing: %-2d, post-smoothing: %-2d, coarse relaxation: %-2d", nu1,nu2,nuc);
printf("\n\n# discr iter error residual rho-error rho-residual\n" );
printf( "# ------ ---- ----- -------- --------- ------------\n" );
}
double residual_norm( GRID u, GRID f ) {
double tmp, res = 0.0;
double invhh = 1.0/(u.h*u.h);
#pragma omp parallel for reduction(+:res)
for (int i=1; i<=u.n; i++)
for(int j=1; j<=u.n; j++) {
tmp = (u.p[i-1][j]+u.p[i][j-1]+u.p[i+1][j]+u.p[i][j+1]-4.0*u.p[i][j])*invhh-f.p[i][j];
res += tmp*tmp;
}
res = sqrt(res)/u.n;
return res;
}
void residual( GRID u, GRID f, GRID d ) {
double invhh = 1.0/(u.h*u.h);
#pragma omp parallel for collapse(2)
for (int i=1; i<=u.n; i++)
for (int j=1; j<=u.n; j++)
d.p[i][j]=f.p[i][j]-(4.0*u.p[i][j]-u.p[i-1][j]-u.p[i+1][j]-u.p[i][j-1]-u.p[i][j+1])*invhh;
}
void restriction( GRID uf, GRID uc ) {
#pragma omp parallel for collapse(2)
for (int i=2; i<=uf.n; i+=2)
for (int j=2; j<=uf.n; j+=2)
uc.p[i/2][j/2] = 0.0625*(uf.p[i-1][j-1]+uf.p[i+1][j-1]+uf.p[i-1][j+1]+uf.p[i+1][j+1])
+0.125*(uf.p[i-1][j]+uf.p[i+1][j]+uf.p[i][j-1]+uf.p[i][j+1])
+0.25*uf.p[i][j];
}
void interpolation( GRID uf, GRID uc ) {
#pragma omp parallel for collapse(2)
for (int i=0; i<=uc.n; i++) {
for (int j=0; j<=uc.n; j++) {
/*
if (i>0 && j>0) uf.p[2*i][2*j] = uc.p[i][j];
if (j>0) uf.p[2*i+1][2*j] = uc.p[i][j];
if (i>0) uf.p[2*i][2*j+1] = uc.p[i][j];
uf.p[2*i+1][2*j+1] = uc.p[i][j];
*/
if (i>0 && j>0) uf.p[2*i][2*j] = uc.p[i][j];
if (j>0) uf.p[2*i+1][2*j] = (uc.p[i+1][j] + uc.p[i][j]) / 2.0;
if (i>0) uf.p[2*i][2*j+1] = (uc.p[i][j+1] + uc.p[i][j]) / 2.0;
uf.p[2*i+1][2*j+1] = (uc.p[i][j] + uc.p[i][j+1] + uc.p[i+1][j] + uc.p[i+1][j+1]) / 4.0;
}
}
}
/*
void vcycle( GRIDLIST u, GRIDLIST f, GRIDLIST w, int l, int nu1, int nu2, int nuc ) {
if (l==0) {
//u[0].p[1][1] = (u[0].h)*(u[0].h) / 4.0 * f[0].p[1][1];
jacobi(&u[0],&f[0],&w[0],nuc);
} else {
jacobi(&u[l],&f[l],&w[l],nu1);
residual(u[l],f[l],w[l]);
restriction(w[l],f[l-1]);
zero_grid(u[l-1]);
vcycle(u,f,w,l-1,nu1,nu2,nuc);
interpolation(w[l],u[l-1]);
correct(u[l],w[l]);
jacobi(&u[l],&f[l],&w[l],nu2);
}
}
*/
void mgrid( GRIDLIST u, GRIDLIST f, GRIDLIST w, int l, int nu1, int nu2, int nuc, CYCLE cycle) {
if (l==0) {
//u[0].p[1][1][1] = (u[0].h)*(u[0].h) / 6.0 * f[0].p[1][1][1];
jacobi(&u[0],&f[0],&w[0],nuc);
} else {
jacobi(&u[l],&f[l],&w[l],nu1);
residual(u[l],f[l],w[l]);
restriction(w[l],f[l-1]);
if (cycle == V) {
zero_grid(u[l-1]);
mgrid(u,f,w,l-1,nu1,nu2,nuc,cycle);
} else if (cycle == W) {
zero_grid(u[l-1]);
mgrid(u,f,w,l-1,nu1,nu2,nuc,cycle);
mgrid(u,f,w,l-1,nu1,nu2,nuc,cycle);
} else if (cycle == S) {
if (l==1) {
zero_grid(u[l-1]);
mgrid(u,f,w,l-1,nu1,nu2,nuc,cycle);
} else {
restriction(f[l-1],f[l-2]);
zero_grid(u[l-2]);
mgrid(u,f,w,l-2,nu1,nu2,nuc,cycle);
interpolation(u[l-1],u[l-2]);
}
}
interpolation(w[l],u[l-1]);
correct(u[l],w[l]);
jacobi(&u[l],&f[l],&w[l],nu2);
}
}
int main( void ) {
int lmax,nc,nu1,nu2,nuc,iters,print_res;
double tstart, ttotal,tmin=10000000.0;
CYCLE cycle;
GRIDLIST u,f,w,uex;
get_options(&iters,&nc,&lmax,&nu1,&nu2,&nuc,&cycle,&print_res);
print_header(lmax,nc,nu1,nu2,nuc,cycle);
create_grid_list(&uex,nc,lmax);
create_grid_list(&u,nc,lmax);
create_grid_list(&f,nc,lmax);
create_grid_list(&w,nc,lmax);
ttotal = 0;
int s;
for(s=0;s<1;s++){
init_grid(u[lmax], 1.0);
init_border(u[lmax]);
init_border(w[lmax]);
init_rhs(f[lmax]);
init_solution(uex[lmax]);
print_errors(u[lmax],f[lmax],uex[lmax],0);
tstart = get_time();
int i = 0;
char ch = 'u';
//print_grid(u[lmax], &ch);
while (i < iters && error(u[lmax],uex[lmax]) > 1e-12) {
mgrid(u,f,w,lmax,nu1,nu2,nuc,cycle);
//vcycle(u,f,w,lmax,nu1,nu2,nuc);
//print_grid(u[lmax], &ch);
i++;
if (print_res)
print_errors(u[lmax],f[lmax],uex[lmax],i);
}
ttotal += (get_time()-tstart);
if(ttotal<tmin) tmin = ttotal;
}
//print_grid(u[lmax], &ch);
if (!print_res)
//printf("\n# total time: \n%7f\t%i #sec.\n", ttotal, i);
printf("\n%7f ms\n", tmin*1000);
free_grid_list(&uex,lmax);
free_grid_list(&u,lmax);
free_grid_list(&f,lmax);
free_grid_list(&w,lmax);
}
|
serialized.c | // RUN: %libomp-compile-and-run | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
int main()
{
#pragma omp parallel num_threads(1)
{
print_ids(0);
print_ids(1);
}
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame=0x{{[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=1, parallel_function=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:.+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
return 0;
}
|
dct.h | /**
* @file dct.h
* @author Yibo Lin
* @date Sep 2018
*/
#ifndef DREAMPLACE_DCT_H
#define DREAMPLACE_DCT_H
#include "utility/src/torch.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
#define CHECK_CPU(x) AT_ASSERTM(!x.is_cuda(), #x "must be a tensor on CPU")
#define CHECK_FLAT(x) AT_ASSERTM(!x.is_cuda() && x.ndimension() == 1, #x "must be a flat tensor on GPU")
#define CHECK_EVEN(x) AT_ASSERTM((x.numel()&1) == 0, #x "must have even number of elements")
#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x "must be contiguous")
at::Tensor dct_forward(
at::Tensor x,
at::Tensor expk,
int num_threads
);
at::Tensor idct_forward(
at::Tensor x,
at::Tensor expk,
int num_threads
);
at::Tensor dct2_forward(
at::Tensor x,
at::Tensor expk0,
at::Tensor expk1,
int num_threads
);
at::Tensor idct2_forward(
at::Tensor x,
at::Tensor expk0,
at::Tensor expk1,
int num_threads
);
at::Tensor dst_forward(
at::Tensor x,
at::Tensor expk,
int num_threads
);
at::Tensor idst_forward(
at::Tensor x,
at::Tensor expk,
int num_threads
);
at::Tensor idxct_forward(
at::Tensor x,
at::Tensor expk,
int num_threads
);
at::Tensor idxst_forward(
at::Tensor x,
at::Tensor expk,
int num_threads
);
at::Tensor idcct2_forward(
at::Tensor x,
at::Tensor expk0,
at::Tensor expk1,
int num_threads
);
at::Tensor idcst2_forward(
at::Tensor x,
at::Tensor expk0,
at::Tensor expk1,
int num_threads
);
at::Tensor idsct2_forward(
at::Tensor x,
at::Tensor expk0,
at::Tensor expk1,
int num_threads
);
at::Tensor idxst_idct_forward(
at::Tensor x,
at::Tensor expk0,
at::Tensor expk1,
int num_threads
);
at::Tensor idct_idxst_forward(
at::Tensor x,
at::Tensor expk0,
at::Tensor expk1,
int num_threads
);
template <typename T>
void computeReorder(
const T* x,
const int M,
const int N,
T* y,
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*N; ++i)
{
int ii = i%N;
if (ii < (N>>1))
{
// i*2
//printf("x[%d] = y[%d]\n", i+ii, i);
y[i] = x[i+ii];
}
else
{
// (N-i)*2-1
//printf("x[%d] = y[%d]\n", i+N*2-ii*3-1, i);
y[i] = x[i+N*2-ii*3-1];
}
}
}
template <typename T>
void computeMulExpk(
const T* x,
const T* expk,
const int M,
const int N,
T* z,
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*N; ++i)
{
int row = i/N; // row
int col = i-row*N; // column
int col_2x = (col<<1);
int fft_onesided_size = (N>>1)+1;
int fft_onesided_size_2x = fft_onesided_size<<1;
if (col_2x <= N)
{
int j = row*fft_onesided_size_2x + col_2x;
//printf("x[%d]*expk[%d] + x[%d]*expk[%d] = z[%d]\n", j, col_2x, j+1, col_2x+1, i);
z[i] = x[j]*expk[col_2x] + x[j+1]*expk[col_2x+1];
}
else
{
int j = row*fft_onesided_size_2x + (N<<1) - col_2x;
//printf("x[%d]*expk[%d] + x[%d]*expk[%d] = z[%d]\n", j, col_2x, j+1, col_2x+1, i);
z[i] = x[j]*expk[col_2x] - x[j+1]*expk[col_2x+1];
}
}
}
template <typename T>
void computeVk(
const T* x,
const T* expk,
const int M,
const int N,
T* v,
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*(N/2+1); ++i)
{
int ncol = N/2+1;
int row = i/ncol; // row
int col = i-row*ncol; // column
int col_2x = (col<<1);
// real
T real = x[row*N+col];
T imag = (col == 0)? 0 : -x[row*N+N-col];
v[2*i] = real*expk[col_2x] - imag*expk[col_2x+1];
// imag, x[N-i]
v[2*i+1] = real*expk[col_2x+1] + imag*expk[col_2x];
}
}
template <typename T>
void computeReorderReverse(
const T* y,
const int M,
const int N,
T* z,
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*N; ++i)
{
int row = i/N; // row
int col = i-row*N; // column
//assert((i-col*2+N-1)*2 < M*N*2);
//printf("z[%d] = y[%d]\n", i, (col&1)? (i-col*3/2+N-1) : (i-col/2));
//z[i] = (col&1)? y[(i-col*3/2+N-1)] : y[(i-col/2)];
// according to the paper, it should be N - (col+1)/2 for col is odd
// but it seems previous implementation accidentally matches this as well
z[i] = (col&1)? y[(i-col) + N - (col+1)/2] : y[(i-col/2)];
}
}
template <typename T>
void addX0AndScale(
const T* x,
const int M,
const int N,
T* y,
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*N; ++i)
{
int i0 = int(i/N)*N;
y[i] = (y[i]+x[i0])*0.5;
}
}
/// extends from addX0AndScale to merge scaling
template <typename T>
void addX0AndScaleN(
const T* x,
const int M,
const int N,
T* y,
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*N; ++i)
{
int i0 = int(i/N)*N;
// this is to match python implementation
// normal way should be multiply by 0.25*N
y[i] = y[i]*0.25*N+x[i0]*0.5;
}
}
/// given an array
/// x_0, x_1, ..., x_{N-1}
/// convert to
/// 0, x_{N-1}, ..., x_2, x_1
/// drop x_0
template <typename T>
void computeFlipAndShift(
const T* x,
const int M,
const int N,
T* y,
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*N; ++i)
{
int ii = i%N;
y[i] = (ii)? x[i+N-ii*2] : 0;
}
}
/// flip sign of odd entries
/// index starts from 0
template <typename T>
void negateOddEntries(
T* x,
const int M,
const int N,
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*(N/2); ++i)
{
x[i*2+1] = -x[i*2+1];
}
}
/// given an array
/// x_0, x_1, ..., x_{N-1}
/// convert to
/// x_{N-1}, ..., x_2, x_1, x_0
template <typename T>
void computeFlip(
const T* x,
const int M,
const int N,
T* y,
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*N; ++i)
{
int ii = i%N;
y[i] = x[i+N-ii*2-1];
}
}
at::Tensor dct_2N_forward(
at::Tensor x,
at::Tensor expk,
int num_threads
);
at::Tensor idct_2N_forward(
at::Tensor x,
at::Tensor expk,
int num_threads
);
at::Tensor dct2_2N_forward(
at::Tensor x,
at::Tensor expk0,
at::Tensor expk1,
int num_threads
);
at::Tensor idct2_2N_forward(
at::Tensor x,
at::Tensor expk0,
at::Tensor expk1,
int num_threads
);
template <typename T>
void computePad(
const T* x, // M*N
const int M,
const int N,
T* z, // M*2N
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*N; ++i)
{
int row = i/N; // row
int col = i-row*N; // column
int j = row*(N<<1) + col;
z[j] = x[i];
}
}
template <typename T>
void computeMulExpk_2N(
const T* x, // M*(N+1)*2
const T* expk,
const int M,
const int N,
T* z, // M*N
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*N; ++i)
{
int row = i/N; // row
int col = i-row*N; // column
int col_2x = (col<<1);
int j = row*((N+1)<<1) + col_2x;
z[i] = x[j]*expk[col_2x] + x[j+1]*expk[col_2x+1];
}
}
template <typename T>
void computeMulExpkAndPad_2N(
const T* x, // M*N
const T* expk,
const int M,
const int N,
T* z, // M*2N*2
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*N; ++i)
{
int row = i/N; // row
int col = i-row*N; // column
int col_2x = (col<<1);
int j = row*(N<<2) + col_2x;
z[j] = x[i]*expk[col_2x];
z[j+1] = x[i]*expk[col_2x+1];
}
}
/// remove last N entries in each column
template <typename T>
void computeTruncation(
const T* x, // M*2N
const int M,
const int N,
T* z, // M*N
int num_threads
)
{
#pragma omp parallel for num_threads(num_threads)
for (int i = 0; i < M*N; ++i)
{
int row = i/N; // row
int col = i-row*N; // column
int j = row*(N<<1) + col;
z[i] = x[j];
}
}
DREAMPLACE_END_NAMESPACE
#endif
|
nlk_eval.c | /******************************************************************************
* NLK - Neural Language Kit
*
* Copyright (c) 2014 Luis Rei <me@luisrei.com> http://luisrei.com @lmrei
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*****************************************************************************/
/** @file nlk_eval.c
* Evaluation functions
*/
#include <errno.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include "nlk_err.h"
#include "nlk_tic.h"
#include "nlk_array.h"
#include "nlk_neuralnet.h"
#include "nlk_w2v.h"
#include "nlk_pv.h"
#include "nlk_text.h"
#include "nlk_vocabulary.h"
#include "nlk_window.h"
#include "nlk_corpus.h"
#include "nlk_pv.h"
#include "nlk_eval.h"
/**
* Parses a line of a word relation test set
*
* @param vocab the vocabulary
* @param lower_words convert words in test set to lower case
* @param line the line string to parse
* @param test the parsed test from the line - result if return = true
*
* @return false on failure, true on success
*/
bool
__nlk_read_question_line(struct nlk_vocab_t **vocab, bool lower_words,
char *line, struct nlk_analogy_test_t *test)
{
char *word;
struct nlk_vocab_t *vi;
size_t ii = 0;
if(vocab == NULL) {
return false;
}
do {
/* tokenize string */
if(ii == 0) {
word = strtok(line, " ");
} else if(ii < 3) {
word = strtok(NULL, " ");
} else {
word = strtok(NULL, "\n");
}
if(word == NULL) {
return false; /* something is wrong, fail */
}
/* to lower case if necessary */
if(lower_words) {
nlk_text_ascii_lower(word);
}
/* find word in voculary */
vi = nlk_vocab_find(vocab, word);
if(vi == NULL) {
return false; /* if any word is not in vocabulary, fail */
}
/* assign value to test structure */
if(ii < 3) {
test->question[ii] = vi;
} else {
test->answer = vi;
}
ii++;
} while(ii < 4);
return true;
}
/**
* Read/Parse a word analogy test file
*
* @param filepath file path of the test file
* @param vocab the vocabulary
* @param lower_words convert words in test set to lower case
* @param total_tests will be overwritten with the total number of tests read
*
* @return an array of word analogy tests
*/
struct nlk_analogy_test_t *
nlk_read_analogy_test_file(const char *filepath, struct nlk_vocab_t **vocab,
const bool lower_words, size_t *total_tests)
{
char line[NLK_MAX_LINE_SIZE];
char *fr;
struct nlk_analogy_test_t *tests; /* array that will contain all test cases */
void *re_tests;
size_t alloc_size = 0; /* allocated size of the tests array */
size_t test_number; /* number of tests read into array */
FILE *in = fopen(filepath, "rb");
if (in == NULL) {
NLK_ERROR_NULL(strerror(errno), errno);
/* unreachable */
}
/* alloc */
alloc_size = NLK_WORD_REL_DEFAULT_SIZE;
tests = (struct nlk_analogy_test_t *) calloc(alloc_size,
sizeof(struct nlk_analogy_test_t));
if(tests == NULL) {
NLK_ERROR_NULL("failed to allocate memory for the test set",
NLK_ENOMEM);
/* unreachable */
}
/*
* read the entire file into memory first
*/
*total_tests = 0;
test_number = 0;
while(!feof(in)) {
/* read line */
fr = fgets(line, NLK_MAX_LINE_SIZE, in);
if(fr == NULL) {
break;
}
if(line[0] == ':') { /* 'header' line */
continue;
}
/* parse a single test case, if successful add to tests array */
if(__nlk_read_question_line(vocab, lower_words, line,
&tests[test_number])) {
test_number++;
}
/* check if we need more memory for tests */
if(test_number == alloc_size) {
re_tests = realloc(tests, 2 * alloc_size);
if(re_tests == NULL) {
free(tests);
NLK_ERROR_NULL("failed to allocate memory for the test set",
NLK_ENOMEM);
/* unreachable */
}
} /* memory reallocated if it was necessary */
}
fclose(in);
*total_tests = test_number;
return tests;
}
/**
* Uses a word relation test set to evaluate the quality of word vectors.
*
* The test is described in
*
* "Efficient Estimation of Word Representations in Vector Space.
* Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean.
* In Proceedings of Workshop at ICLR, 2013.
*
* And a test set is available at https://code.google.com/p/word2vec/
*
* @param filepath file path of the test file
* @param vocab the vocabulary
* @param weights the weights matrix containing the representation of the
* words in the vocabulary
* @param limit limit for the number of words in the vocabulary
* @param lower_words convert words in test set to lower case
* @param accuracy the total accuracy (return value)
*
* @return NLK_SUCCESS or NLK_FAILURE
*
* @note
* This function ignores tests with OOV words. It is mostly meant to be
* used to monitor the progress of training a word representation.
* @endnote
*/
int
nlk_eval_on_questions(const char *filepath, struct nlk_vocab_t **vocab,
const NLK_ARRAY *weights, const size_t limit,
const bool lower_words, nlk_real *accuracy)
{
struct nlk_analogy_test_t *tests; /* will contain all test cases */
size_t total_tests;
size_t _limit;
NLK_ARRAY *weights_norm; /* for the normalized copy of the weights */
size_t correct = 0;
size_t executed = 0;
/* read file */
tests = nlk_read_analogy_test_file(filepath, vocab, lower_words,
&total_tests);
if(tests == NULL) {
return NLK_FAILURE;
}
/* normalize weights to make distance calculations easier */
weights_norm = nlk_array_create_copy(weights);
nlk_array_normalize_row_vectors(weights_norm);
*accuracy = 0;
if(limit == 0) {
_limit = weights_norm->rows;
} else {
_limit = limit;
}
/*
* perform the tests
*/
#pragma omp parallel reduction(+ : correct) reduction(+ : executed)
{
NLK_ARRAY *predicted = nlk_array_create(1, weights->cols);
NLK_ARRAY *sub = nlk_array_create(1, weights->cols);
NLK_ARRAY *add = nlk_array_create(1, weights->cols);
NLK_ARRAY *word_vector = nlk_array_create(1, weights->cols);
struct nlk_analogy_test_t *test; /* iteration test case*/
#pragma omp for
for(size_t test_number = 0; test_number < total_tests; test_number++) {
nlk_real similarity = 0;
nlk_real best_similarity = 0;
size_t most_similar = 0;
test = &tests[test_number];
/* if any of the words is not in the limited vocab, skip */
if(test->answer->index > _limit
|| test->question[0]->index > _limit
|| test->question[1]->index > _limit
|| test->question[2]->index > _limit) {
continue;
}
/* vector for the second word in test: word_vector2 */
nlk_array_copy_row(predicted, 0, weights_norm,
test->question[1]->index);
/* word_vector1 (vector for first word) */
nlk_array_copy_row(sub, 0, weights_norm, test->question[0]->index);
/* word_vector2 - word_vector1 */
nlk_array_scale(-1.0, sub);
nlk_array_add(sub, predicted);
/* word_vector3 (vector for the third word) */
nlk_array_copy_row(add, 0, weights_norm, test->question[2]->index);
/* word_vector2 - word_vector1 + word_vector3 */
nlk_array_add(add, predicted);
/* find the closest vector to *predicted* in the weights matrix */
for(size_t word_index = 0; word_index < _limit; word_index++) {
/* ignore words in the test */
if(word_index == test->question[0]->index
|| word_index == test->question[1]->index
|| word_index == test->question[2]->index) {
continue;
}
/* compute similarity */
nlk_array_copy_row(word_vector, 0, weights_norm, word_index);
similarity = nlk_array_dot(word_vector, predicted, 1);
/* check if it is better than previous best */
if(similarity > best_similarity) {
best_similarity = similarity;
most_similar = word_index;
}
} /* end of find closes word */
/* check to see of the closest word found is the target word */
if(test->answer->index == most_similar) {
correct++;
}
executed++;
}
/* cleanup */
nlk_array_free(word_vector);
nlk_array_free(predicted);
nlk_array_free(sub);
nlk_array_free(add);
} /* END OF PARALLEL BLOCk */
free(tests);
nlk_array_free(weights_norm);
/* result */
*accuracy = correct / (nlk_real) executed;
return NLK_SUCCESS;
}
/**
*/
float
nlk_eval_on_paraphrases(struct nlk_neuralnet_t *nn,
const struct nlk_corpus_t *corpus,
unsigned int epochs,
const bool verbose)
{
/** @section Allocation and Initialization
*/
float accuracy = 0;
int correct = 0;
int total = 0;
/* generate paragraph vectors */
struct nlk_layer_lookup_t *par_table = nlk_pv_gen(nn, corpus,
epochs,
verbose);
NLK_ARRAY *par_vectors = par_table->weights;
/* normalize weights to make distance calculations easier */
if(verbose) {
nlk_tic("normalizing paragraph vectors", true);
}
nlk_array_normalize_row_vectors(par_vectors);
size_t limit = par_vectors->rows;
/** @section Eval Loop
*/
if(verbose) {
nlk_tic("evaluating", true);
}
#pragma omp parallel for reduction(+ : correct) reduction(+ : total)
for(size_t tt = 0; tt < corpus->len / 2; tt++) {
nlk_real best_similarity = 0;
size_t most_similar = 0;
nlk_real sim = 0;
for(size_t index = 0; index < limit; index++) {
if(index == tt) {
continue; /* ignore self */
}
sim = nlk_array_row_dot(par_vectors, tt, par_vectors, index);
if(sim > best_similarity) {
best_similarity = sim;
most_similar = index;
}
}
/*
nlk_debug("%zu -sim-> %zu (%f)\n", tt, most_similar, best_similarity);
*/
/* is result correct? */
if(tt % 2 == 0 && most_similar == tt + 1) {
correct++;
} else if(tt % 2 == 1 && most_similar == tt - 1) {
correct++;
}
total += 1;
} /* end of parallel for */
accuracy = (nlk_real) correct / (nlk_real) total;
if(verbose) {
printf("accuracy %f (%d/%d)\n", accuracy, correct, total);
}
nlk_layer_lookup_free(par_table);
return accuracy;
}
int
nlk_eval_on_paraphrases_pre_gen(const NLK_ARRAY *pvs, size_t limit,
const int verbose, nlk_real *accuracy)
{
/** @section Allocation and Initialization
*/
*accuracy = 0;
int correct = 0;
int total = 0;
/* create resized copy */
NLK_ARRAY *par_vectors = nlk_array_create_copy_limit(pvs, limit);
limit = par_vectors->rows;
if(verbose) {
printf("Dimensions: %zu %zu\n", par_vectors->rows, par_vectors->cols);
}
/* normalize weights to make distance calculations easier */
if(verbose) {
nlk_tic("normalizing paragraph vectors", true);
}
nlk_array_normalize_row_vectors(par_vectors);
/** @section Eval Loop
*/
if(verbose) {
nlk_tic("evaluating", true);
}
#pragma omp parallel reduction(+ : correct) reduction(+ : total)
{
#pragma omp for
for(size_t tt = 0; tt < limit; tt += 2) {
nlk_real best_similarity = 0;
size_t most_similar = 0;
nlk_real sim = 0;
for(size_t index = 0; index < limit; index++) {
if(index == tt) {
continue; /* ignore self */
}
sim = nlk_array_row_dot(par_vectors, tt, par_vectors, index);
if(sim > best_similarity) {
best_similarity = sim;
most_similar = index;
}
}
/* is result correct? */
#ifdef DEBUGPRINT
printf("%zu -sim-> %zu (sim = %f)\n", tt, most_similar,
best_similarity);
#endif
if(tt % 2 == 0 && most_similar == tt + 1) {
correct++;
} else if(tt % 2 == 1 && most_similar == tt - 1) {
correct++;
}
total += 1;
}
} /* end of parallel for */
*accuracy = (nlk_real) correct / (nlk_real) total;
if(verbose) {
printf("correct = %d/%zu\n", correct, limit / 2);
}
nlk_array_free(par_vectors);
return NLK_SUCCESS;
}
|
config.c |
#include "grid.h"
#include "config.h"
#include <hdf5.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netdb.h>
#include <errno.h>
const int WORK_SPLIT_THRESHOLD = 3;
double get_time_ns()
{
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
return ts.tv_sec + (double)ts.tv_nsec / 1000000000;
}
void bl_bounding_box(struct vis_spec *spec,
int a1, int a2,
int tstep0, int tstep1,
int fstep0, int fstep1,
double *uvw_l_min, double *uvw_l_max)
{
struct ant_config *cfg = spec->cfg;
// Check time start and end (TODO - that's simplifying quite a bit)
double uvw0[3], uvw1[3];
ha_to_uvw_sc(cfg, a1, a2,
spec->ha_sin[tstep0], spec->ha_cos[tstep0],
spec->dec_sin, spec->dec_cos,
uvw0);
ha_to_uvw_sc(cfg, a1, a2,
spec->ha_sin[tstep1], spec->ha_cos[tstep1],
spec->dec_sin, spec->dec_cos,
uvw1);
// Conversion factor to uvw in lambda
double f0, f1;
f0 = spec->freq_start + spec->freq_step * fstep0;
f1 = spec->freq_start + spec->freq_step * fstep1;
double scale0 = uvw_m_to_l(1, f0),
scale1 = uvw_m_to_l(1, f1);
// Determine bounding box
int i = 0;
for (i = 0; i < 3; i++) {
uvw_l_min[i] = fmin(fmin(uvw0[i]*scale0, uvw0[i]*scale1),
fmin(uvw1[i]*scale0, uvw1[i]*scale1));
uvw_l_max[i] = fmax(fmax(uvw0[i]*scale0, uvw0[i]*scale1),
fmax(uvw1[i]*scale0, uvw1[i]*scale1));
}
}
void bl_bounding_subgrids(struct vis_spec *spec,
double lam, double xA, int a1, int a2,
int *sg_min, int *sg_max)
{
double uvw_l_min[3], uvw_l_max[3];
bl_bounding_box(spec, a1, a2,
0, spec->time_count-1,
0, spec->freq_count-1,
uvw_l_min, uvw_l_max);
//printf("BL u %g-%g v %g-%g\n", uvw_l_min[0], uvw_l_max[0], uvw_l_min[1], uvw_l_max[1]);
// Convert into subgrid indices
sg_min[0] = (int)round(uvw_l_min[0]/lam/xA);
sg_min[1] = (int)round(uvw_l_min[1]/lam/xA);
sg_max[0] = (int)round(uvw_l_max[0]/lam/xA);
sg_max[1] = (int)round(uvw_l_max[1]/lam/xA);
}
struct worker_prio
{
int worker;
int nbl;
};
static int compare_prio_nbl(const void *_w1, const void *_w2)
{
const struct worker_prio *w1 = (const struct worker_prio *)_w1;
const struct worker_prio *w2 = (const struct worker_prio *)_w2;
return w1->nbl > w2->nbl;
}
static void bin_baseline(struct vis_spec *spec, double lam, double xA,
int *nbl, struct subgrid_work_bl **bls, int nsubgrid,
int a1, int a2, int iu, int iv)
{
assert (iu >= 0 && iu < nsubgrid);
assert (iv >= 0 && iv < nsubgrid);
int chunks = 0, tchunk, fchunk;
double sg_min_u = lam * (xA*(iu-nsubgrid/2) - xA/2);
double sg_min_v = lam * (xA*(iv-nsubgrid/2) - xA/2);
double sg_max_u = lam * (xA*(iu-nsubgrid/2) + xA/2);
double sg_max_v = lam * (xA*(iv-nsubgrid/2) + xA/2);
int ntchunk = (spec->time_count + spec->time_chunk - 1) / spec->time_chunk;
int nfchunk = (spec->freq_count + spec->freq_chunk - 1) / spec->freq_chunk;
// Count number of overlapping chunks
for (tchunk = 0; tchunk < ntchunk; tchunk++) {
// Check frequencies. We adjust step length exponentially so
// we can jump over non-matching space quicker, see
// below. This bit of code is likely a bit too smart for its
// own good!
int fstep = 1;
for (fchunk = 0; fchunk < nfchunk; fchunk+=fstep) {
// Determine chunk bounding box
double uvw_l_min[3], uvw_l_max[3];
bl_bounding_box(spec, a1, a2,
tchunk * spec->time_chunk,
fmin(spec->time_count, (tchunk+1) * spec->time_chunk) - 1,
fchunk * spec->freq_chunk,
fmin(spec->freq_count, (fchunk+fstep) * spec->freq_chunk) - 1,
uvw_l_min, uvw_l_max);
//printf("u: sg %g-%g chunk %g-%g\n", sg_min_u, sg_max_u, uvw_l_min[0], uvw_l_max[0]);
//printf("v: sg %g-%g chunk %g-%g\n", sg_min_v, sg_max_v, uvw_l_min[1], uvw_l_max[1]);
if ((uvw_l_min[0] < sg_max_u && uvw_l_max[0] > sg_min_u &&
uvw_l_min[1] < sg_max_v && uvw_l_max[1] > sg_min_v) ||
(-uvw_l_max[0] < sg_max_u && -uvw_l_min[0] > sg_min_u &&
-uvw_l_max[1] < sg_max_v && -uvw_l_min[1] > sg_min_v)) {
if (fstep == 1) {
// Found a chunk
chunks++;
} else {
// Went too fast. Decrease step length, recheck.
fstep /= 2;
fchunk -= fstep;
}
} else {
// Speed up. Increase step length.
fchunk -= fstep;
fstep *= 2;
}
}
}
if (!chunks)
return;
// Count
nbl[iv*nsubgrid + iu]+=chunks;
// Make sure we don't add a baseline twice
if (bls[iv * nsubgrid + iu]) {
assert(bls[iv * nsubgrid + iu]->a1 != a1 ||
bls[iv * nsubgrid + iu]->a2 != a2);
}
// Add work structure
struct subgrid_work_bl *wbl = (struct subgrid_work_bl *)
malloc(sizeof(struct subgrid_work_bl));
wbl->a1 = a1; wbl->a2 = a2; wbl->chunks=chunks;
wbl->next = bls[iv * nsubgrid + iu];
bls[iv * nsubgrid + iu] = wbl;
}
// Bin baselines per overlapping subgrid
static int collect_baselines(struct vis_spec *spec,
double lam, double xA,
int **pnbl, struct subgrid_work_bl ***pbls,
bool dump_baselines)
{
// Determine number of subgrid bins we need
int nsubgrid = 2 * (int)ceil(1. / 2 / xA) + 1;
int *nbl = (int *)calloc(sizeof(int), nsubgrid * nsubgrid);
struct subgrid_work_bl **bls = (struct subgrid_work_bl **)
calloc(sizeof(struct subgrid_work_bl *), nsubgrid * nsubgrid);
// Determine baseline bounding boxes
int nbl_total = spec->cfg->ant_count * (spec->cfg->ant_count - 1) / 2;
int *sg_mins = (int *)malloc(sizeof(int) * 2 * nbl_total),
*sg_maxs = (int *)malloc(sizeof(int) * 2 * nbl_total);
int a1, a2, bl = 0;
for (a1 = 0; a1 < spec->cfg->ant_count; a1++) {
for (a2 = a1+1; a2 < spec->cfg->ant_count; a2++, bl++) {
bl_bounding_subgrids(spec, lam, xA, a1, a2, sg_mins + bl * 2, sg_maxs + bl * 2);
}
}
int iv, iu;
#pragma omp parallel for collapse(2) schedule(dynamic,8)
for (iv = 0; iv < nsubgrid; iv++) {
for (iu = nsubgrid/2; iu < nsubgrid; iu++) {
int a1, a2, bl=0;
for (a1 = 0; a1 < spec->cfg->ant_count; a1++) {
for (a2 = a1+1; a2 < spec->cfg->ant_count; a2++, bl++) {
int *sg_min = sg_mins + bl * 2, *sg_max = sg_maxs + bl * 2;
if (iv >= nsubgrid/2+sg_min[1] && iv <= nsubgrid/2+sg_max[1] &&
iu >= nsubgrid/2+sg_min[0] && iu <= nsubgrid/2+sg_max[0]) {
bin_baseline(spec, lam, xA, nbl, bls, nsubgrid, a1, a2, iu, iv);
} else if(iv >= nsubgrid/2-sg_max[1] && iv <= nsubgrid/2-sg_min[1] &&
iu >= nsubgrid/2-sg_max[0] && iu <= nsubgrid/2-sg_min[0]) {
bin_baseline(spec, lam, xA, nbl, bls, nsubgrid, a1, a2, iu, iv);
}
}
}
}
}
free(sg_mins); free(sg_maxs);
// Produce dump if requested
if (dump_baselines) {
printf("Baseline bins:\n---\niu,iv,chunks\n");
for (iv = 0; iv < nsubgrid; iv++) {
for (iu = nsubgrid/2; iu < nsubgrid; iu++) {
int chunks = 0; struct subgrid_work_bl *bl;
for (bl = bls[nsubgrid*iv+iu]; bl; bl=bl->next) {
chunks += bl->chunks;
}
if (chunks) {
printf("%d,%d,%d\n", iu, iv, chunks);
}
}
}
printf("---\n");
}
*pnbl = nbl;
*pbls = bls;
return nsubgrid;
}
// Pop given number of baselines from the start of the linked list
static struct subgrid_work_bl *pop_chunks(struct subgrid_work_bl **bls, int n, int *nchunks)
{
struct subgrid_work_bl *first = *bls;
struct subgrid_work_bl *bl = *bls;
*nchunks = 0;
assert(n >= 1);
if (!bl) return bl;
while (n > bl->chunks && bl->next) {
*nchunks += bl->chunks;
n-=bl->chunks;
bl = bl->next;
}
*nchunks += bl->chunks;
*bls = bl->next;
bl->next = NULL;
return first;
}
static bool generate_subgrid_work_assignment(struct work_config *cfg)
{
struct vis_spec *spec = &cfg->spec;
// Count visibilities per sub-grid
double xA = (double)cfg->recombine.xA_size / cfg->recombine.image_size;
int *nbl; struct subgrid_work_bl **bls;
printf("Covering %d time steps, %d channels, %d baselines\n",
cfg->spec.time_count, cfg->spec.freq_count,
cfg->spec.cfg->ant_count * (cfg->spec.cfg->ant_count - 1) / 2);
printf("Binning chunks...\n");
double start = get_time_ns();
int nsubgrid = collect_baselines(spec, cfg->recombine.image_size / cfg->theta,
xA, &nbl, &bls, cfg->config_dump_baseline_bins);
printf(" %g s\n", get_time_ns() - start);
// Count how many sub-grids actually have visibilities
int npop = 0, nbl_total = 0, nbl_max = 0;
int iu, iv;
for (iv = 0; iv < nsubgrid; iv++) {
for (iu = nsubgrid/2; iu < nsubgrid; iu++) {
if (nbl[iv * nsubgrid + iu]) {
npop++;
nbl_total+=nbl[iv * nsubgrid + iu];
if (nbl[iv * nsubgrid + iu] > nbl_max)
nbl_max = nbl[iv * nsubgrid + iu];
}
}
}
double coverage = (double)npop * cfg->recombine.xA_size * cfg->recombine.xA_size
/ cfg->recombine.image_size / cfg->recombine.image_size;
// We don't want bins that are too full compared to the average -
// determine at what point we're going to split them.
int work_max_nbl = (int)fmax(WORK_SPLIT_THRESHOLD * nbl_total / npop,
(nbl_max + cfg->subgrid_workers - 1) / cfg->subgrid_workers);
printf("%d subgrid baseline bins (%.3g%% coverage), %.5g average chunks per subgrid, "
"splitting above %d\n",
npop, coverage*100, (double)nbl_total / npop, work_max_nbl);
// Now count again how much work we have total, and per
// column. Note that we ignore grid data at u < 0, as transferring
// half the grid is enough to reconstruct a real-valued image.
int nwork = 0, max_work_column = 0;
for (iu = nsubgrid/2; iu < nsubgrid; iu++) {
int nwork_start = nwork;
for (iv = 0; iv < nsubgrid; iv++) {
int nv = nbl[iv * nsubgrid + iu];
nwork += (nv + work_max_nbl - 1) / work_max_nbl;
}
// How much work in this column?
if (nwork - nwork_start > max_work_column)
max_work_column = nwork-nwork_start;
}
// Allocate work description
cfg->subgrid_max_work = (nwork + cfg->subgrid_workers - 1) / cfg->subgrid_workers;
cfg->subgrid_work = (struct subgrid_work *)
calloc(sizeof(struct subgrid_work), cfg->subgrid_workers * cfg->subgrid_max_work);
printf("%d split subgrid baseline bins, %d per worker\n", nwork, cfg->subgrid_max_work);
// Worker priority order for acquiring new work
struct worker_prio *worker_prio = malloc(sizeof(worker_prio) * cfg->subgrid_workers);
int i;
for (i = 0; i < cfg->subgrid_workers; i++) {
worker_prio[i].worker = i;
worker_prio[i].nbl = 0;
}
// Go through columns and assign work
int iworker = 0, iwork = 0;
for (iu = nsubgrid/2; iu < nsubgrid; iu++) {
// Generate column of work
int start_bl;
for (iv = 0; iv < nsubgrid; iv++) {
int nv = nbl[iv * nsubgrid + iu];
for (start_bl = 0; start_bl < nv; start_bl += work_max_nbl) {
// Assign work to next worker
struct subgrid_work *work =
cfg->subgrid_work + iworker * cfg->subgrid_max_work + iwork;
work->iu = iu - nsubgrid/2;
work->iv = iv - nsubgrid/2;
work->subgrid_off_u = cfg->recombine.xA_size * work->iu;
work->subgrid_off_v = cfg->recombine.xA_size * work->iv;
work->bls = pop_chunks(&bls[iv * nsubgrid + iu], work_max_nbl,
&work->nbl);
// Save back how many chunks were assigned
worker_prio[iworker].nbl += work->nbl;
iworker++;
if (iworker >= cfg->subgrid_workers) {
iworker = 0;
iwork++;
}
}
}
}
// Determine average
int64_t sum = 0;
for (i = 0; i < cfg->subgrid_workers; i++) {
sum += worker_prio[i].nbl;
}
int average = sum / cfg->subgrid_workers;
// Swap work to even out profile
bool improvement; int nswaps = 0;
do {
improvement = false;
// Sort worker priority
qsort(worker_prio, cfg->subgrid_workers, sizeof(void *), compare_prio_nbl);
// Walk through worker pairs
int prio1 = 0, prio2 = cfg->subgrid_workers - 1;
while(prio1 < prio2) {
int diff = worker_prio[prio2].nbl - worker_prio[prio1].nbl;
int worker1 = worker_prio[prio1].worker;
int worker2 = worker_prio[prio2].worker;
// Find a work item to switch
int iwork;
struct subgrid_work *work1 = cfg->subgrid_work + worker1 * cfg->subgrid_max_work;
struct subgrid_work *work2 = cfg->subgrid_work + worker2 * cfg->subgrid_max_work;
int best = -1, best_diff = diff;
for (iwork = 0; iwork < cfg->subgrid_max_work; iwork++) {
int wdiff = work2[iwork].nbl - work1[iwork].nbl;
if (abs(diff - 2*wdiff) < best_diff) {
best = iwork; best_diff = abs(diff - 2*wdiff);
}
}
// Found a swap?
if (best != -1) {
struct subgrid_work w = work1[best];
work1[best] = work2[best];
work2[best] = w;
worker_prio[prio1].nbl += work1[best].nbl - work2[best].nbl;
worker_prio[prio2].nbl += work2[best].nbl - work1[best].nbl;
improvement = true;
nswaps++;
break;
}
// Step workers. Keep the one that is further away from the
// average.
if (abs(worker_prio[prio2].nbl - average) >
abs(worker_prio[prio1].nbl - average)) {
prio1++;
} else {
prio2--;
}
}
} while(improvement);
// Statistics
int min_vis = INT_MAX, max_vis = 0;
cfg->iu_min = INT_MAX; cfg->iu_max = INT_MIN;
cfg->iv_min = INT_MAX; cfg->iv_max = INT_MIN;
for (i = 0; i < cfg->subgrid_workers; i++) {
int j; int vis = 0;
for (j = 0; j < cfg->subgrid_max_work; j++) {
struct subgrid_work *work = cfg->subgrid_work + i* cfg->subgrid_max_work+j;
if (work->iu < cfg->iu_min) cfg->iu_min = work->iu;
if (work->iu > cfg->iu_max) cfg->iu_max = work->iu;
if (work->iv < cfg->iv_min) cfg->iv_min = work->iv;
if (work->iv > cfg->iv_max) cfg->iv_max = work->iv;
vis += work->nbl;
//printf("%d ", work->nbl);
}
//printf(" -> %d %d\n", vis, worker_prio[i].nbl);
min_vis = fmin(vis, min_vis);
max_vis = fmax(vis, max_vis);
}
printf("Assigned workers %d chunks min, %d chunks max (after %d swaps)\n", min_vis, max_vis, nswaps);
if (cfg->config_dump_subgrid_work) {
printf("Subgrid work (after swaps):\n---\nworker,work,chunks\n");
for (i = 0; i < cfg->subgrid_workers; i++) {
int j;
for (j = 0; j < cfg->subgrid_max_work; j++) {
struct subgrid_work *work = cfg->subgrid_work + i* cfg->subgrid_max_work+j;
if (work->nbl > 0) {
printf("%d,%d,%d\n", i,j, work->nbl);
}
}
}
puts("---");
}
return true;
}
static bool generate_facet_work_assignment(struct work_config *cfg)
{
if (cfg->facet_workers == 0) return true;
// This is straightforward: We just assume that all facets within
// the field of view are set. Note that theta is generally larger
// than the FoV, so this won't cover the entire image.
double yB = (double)cfg->recombine.yB_size / cfg->recombine.image_size;
int nfacet = 2 * ceil(cfg->spec.fov / cfg->theta / yB / 2 - 0.5) + 1;
printf("%dx%d facets covering %g FoV (facet %g, grid theta %g)\n",
nfacet, nfacet, cfg->spec.fov, cfg->theta * yB, cfg->theta);
// Allocate work array
cfg->facet_max_work = (nfacet * nfacet + cfg->facet_workers - 1) / cfg->facet_workers;
cfg->facet_count = nfacet * nfacet;
cfg->facet_work = (struct facet_work *)
calloc(sizeof(struct facet_work), cfg->facet_workers * cfg->facet_max_work);
int i;
for (i = 0; i < nfacet * nfacet; i++) {
int iworker = i % cfg->facet_workers, iwork = i / cfg->facet_workers;
struct facet_work *work = cfg->facet_work + cfg->facet_max_work * iworker + iwork;
work->il = (i / nfacet) - nfacet/2;
work->im = (i % nfacet) - nfacet/2;
work->facet_off_l = work->il * cfg->recombine.yB_size;
work->facet_off_m = work->im * cfg->recombine.yB_size;
work->set = true;
}
return true;
}
static bool generate_full_redistribute_assignment(struct work_config *cfg)
{
// No visibilities involved, so generate work assignment where we
// simply redistribute all data from a number of facets matching
// the number of facet workers.
assert(!cfg->spec.time_count);
int nsubgrid = cfg->recombine.image_size / cfg->recombine.xA_size;
int subgrid_work = nsubgrid * nsubgrid;
cfg->subgrid_max_work = (subgrid_work + cfg->subgrid_workers - 1) / cfg->subgrid_workers;
cfg->subgrid_work = (struct subgrid_work *)
calloc(sizeof(struct subgrid_work), cfg->subgrid_max_work * cfg->subgrid_workers);
int i;
for (i = 0; i < subgrid_work; i++) {
struct subgrid_work *work = cfg->subgrid_work + i;
work->iu = i / nsubgrid;
work->iv = i % nsubgrid;
work->subgrid_off_u = work->iu * cfg->recombine.xA_size;
work->subgrid_off_v = work->iv * cfg->recombine.xA_size;
work->nbl = 1;
// Dummy 0-0 baseline
work->bls = (struct subgrid_work_bl *)calloc(sizeof(struct subgrid_work_bl), 1);
}
cfg->iu_min = cfg->iv_min = 0;
cfg->iu_max = cfg->iv_max = nsubgrid-1;
if (cfg->facet_workers == 0) return true;
int nfacet = cfg->recombine.image_size / cfg->recombine.yB_size;
cfg->facet_max_work = (nfacet * nfacet + cfg->facet_workers - 1) / cfg->facet_workers;
cfg->facet_count = nfacet * nfacet;
cfg->facet_work = (struct facet_work *)
calloc(sizeof(struct facet_work), cfg->facet_max_work * cfg->facet_workers);
for (i = 0; i < nfacet * nfacet; i++) {
int iworker = i % cfg->facet_workers, iwork = i / cfg->facet_workers;
struct facet_work *work = cfg->facet_work + cfg->facet_max_work * iworker + iwork;
work->il = i / nfacet;
work->im = i % nfacet;
work->facet_off_l = work->il * cfg->recombine.yB_size;
work->facet_off_m = work->im * cfg->recombine.yB_size;
work->set = true;
}
return true;
}
void config_init(struct work_config *cfg)
{
// Initialise structure
memset(cfg, 0, sizeof(*cfg));
cfg->gridder_x0 = 0.5;
cfg->config_dump_baseline_bins = false;
cfg->config_dump_subgrid_work = false;
cfg->produce_parallel_cols = false;
cfg->produce_retain_bf = true;
cfg->produce_source_count = 0;
cfg->produce_source_checks = 16384;
cfg->produce_batch_rows = 16;
cfg->produce_queue_length = 4;
cfg->vis_skip_metadata = true;
cfg->vis_bls_per_task = 256;
cfg->vis_subgrid_queue_length = 4;
cfg->vis_task_queue_length = 96;
cfg->vis_chunk_queue_length = 32768;
cfg->statsd_socket = -1;
cfg->statsd_rate = 1;
}
static void print_power2(int x)
{
if (x >= 1024 * 1024 && x % (1024 * 1024) == 0) {
printf("%dM", x / 1024 / 1024);
} else if (x >= 1024 && x % 1024 == 0) {
printf("%dk", x / 1024);
} else {
printf("%d", x);
}
}
bool config_set(struct work_config *cfg,
int image_size, int subgrid_spacing,
char *pswf_file,
int yB_size, int yN_size, int yP_size,
int xA_size, int xM_size, int xMxN_yP_size)
{
// Set recombination configuration
printf("\nInitialising recombination (image size "); print_power2(image_size);
printf(", facet FFT "); print_power2(yP_size);
printf(", subgrid FFT "); print_power2(xM_size);
printf(")...\n");
if (!recombine2d_set_config(&cfg->recombine, image_size, subgrid_spacing, pswf_file,
yB_size, yN_size, yP_size,
xA_size, xM_size, xMxN_yP_size))
return false;
return true;
}
void config_free(struct work_config *cfg)
{
free(cfg->vis_path);
free(cfg->facet_work);
free(cfg->gridder_path);
free(cfg->grid_correction);
int i;
for (i = 0; i < cfg->subgrid_workers * cfg->subgrid_max_work; i++) {
while (cfg->subgrid_work[i].bls) {
struct subgrid_work_bl *bl = cfg->subgrid_work[i].bls;
cfg->subgrid_work[i].bls = cfg->subgrid_work[i].bls->next;
free(bl);
}
}
free(cfg->subgrid_work);
free(cfg->spec.ha_sin);
free(cfg->spec.ha_cos);
if (cfg->statsd_socket != -1) close(cfg->statsd_socket);
cfg->statsd_socket = -1;
}
void config_set_visibilities(struct work_config *cfg,
struct vis_spec *spec, double theta,
const char *vis_path)
{
// Copy
cfg->spec = *spec;
cfg->theta = theta;
if (vis_path)
cfg->vis_path = strdup(vis_path);
// Cache cosinus + sinus values
cfg->spec.ha_sin = (double *)malloc(sizeof(double) * cfg->spec.time_count);
cfg->spec.ha_cos = (double *)malloc(sizeof(double) * cfg->spec.time_count);
int it;
for (it = 0; it < cfg->spec.time_count; it++) {
double t = spec->time_start + spec->time_step * it;
cfg->spec.ha_sin[it] = sin(t * M_PI / 12);
cfg->spec.ha_cos[it] = cos(t * M_PI / 12);
}
cfg->spec.dec_sin = sin(cfg->spec.dec);
cfg->spec.dec_cos = cos(cfg->spec.dec);
}
bool config_set_degrid(struct work_config *cfg, const char *gridder_path)
{
if (gridder_path) {
// Clear existing data, if any
cfg->gridder_x0 = 0.5;
free(cfg->gridder_path); cfg->gridder_path = 0;
free(cfg->grid_correction); cfg->grid_correction = 0;
// Get gridder's accuracy limit
double *px0 = (double *)read_hdf5(sizeof(double), gridder_path, "sepkern/x0");
if (!px0) return false;
printf("Gridder %s with x0=%g\n", gridder_path, *px0);
// Get grid correction dimensions
int ncorr = get_npoints_hdf5(gridder_path, "sepkern/corr");
// Read grid correction
double *grid_corr = read_hdf5(sizeof(double) * ncorr, gridder_path, "sepkern/corr");
if (!grid_corr) {
fprintf(stderr, "ERROR: Could not read grid correction from %s!\n", gridder_path);
return false;
}
// Need to rescale? This is linear, therefore worth a
// warning. Might want to do a "sinc" interpolation instead at
// some point? Could be more appropriate.
if (ncorr != cfg->recombine.image_size) {
if (ncorr % cfg->recombine.image_size != 0) {
fprintf(stderr, "WARNING: Rescaling grid correction from %d to %d points!\n",
ncorr, cfg->recombine.image_size);
}
int i;
cfg->grid_correction = (double *)malloc(sizeof(double) * cfg->recombine.image_size);
for (i = 0; i < cfg->recombine.image_size; i++) {
double j = (double)i * ncorr / cfg->recombine.image_size;
int j0 = (int)floor(j), j1 = (j0 + 1) % ncorr;
double w = j - j0;
cfg->grid_correction[i] = (1 - w) * grid_corr[j0] + w * grid_corr[j1];
}
free(grid_corr);
} else {
cfg->grid_correction = grid_corr;
}
cfg->gridder_x0 = *px0;
cfg->gridder_path = strdup(gridder_path);
free(px0);
}
return true;
}
bool config_set_statsd(struct work_config *cfg,
const char *node, const char *service)
{
if (cfg->statsd_socket != -1) close(cfg->statsd_socket);
cfg->statsd_socket = -1;
// Resolve statsd address
struct addrinfo hints, *result;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_DGRAM;
int ret = getaddrinfo(node, service, &hints, &result);
if (ret != 0) {
fprintf(stderr, "ERROR: Could not resolve statsd address (%s)", gai_strerror(ret));
return false;
}
// Create socket
struct addrinfo *addr = NULL;
for (addr = result; addr; addr = addr->ai_next) {
// Attempt to create socket
cfg->statsd_socket = socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol);
if (cfg->statsd_socket == -1)
continue;
// And connect
if (connect(cfg->statsd_socket, addr->ai_addr, addr->ai_addrlen) != -1)
break;
close(cfg->statsd_socket); cfg->statsd_socket = -1;
}
if (cfg->statsd_socket == -1) {
fprintf(stderr, "ERROR: Could not create statsd socket (%s)", strerror(errno));
freeaddrinfo(result);
return false;
}
freeaddrinfo(result);
// Initialise stats
printf("Opened statsd connection to %s:%s\n", node, service);
return true;
}
void config_send_statsd(struct work_config *cfg, const char *stat)
{
if (cfg->statsd_socket == -1)
return;
//printf("stats: %s\n", stat);
if (write(cfg->statsd_socket, stat, strlen(stat)) != strlen(stat)) {
fprintf(stderr, "ERROR: Failed to send to statsd (%s)\n", strerror(errno));
close(cfg->statsd_socket);
cfg->statsd_socket = -1;
}
}
void config_load_facets(struct work_config *cfg,
const char *path_fmt,
const char *hdf5)
{
int i;
for (i = 0; i < cfg->facet_workers * cfg->facet_max_work; i++) {
struct facet_work *work = cfg->facet_work + i;
if (!work->set) continue;
char path[256];
snprintf(path, 256, path_fmt, work->im, work->il);
work->path = strdup(path);
work->hdf5 = hdf5 ? strdup(hdf5) : NULL;
}
}
void config_check_subgrids(struct work_config *cfg,
double threshold, double fct_threshold,
double degrid_threshold,
const char *check_fmt,
const char *check_fct_fmt,
const char *check_degrid_fmt,
const char *hdf5)
{
int i;
for (i = 0; i < cfg->subgrid_workers * cfg->subgrid_max_work; i++) {
struct subgrid_work *work = cfg->subgrid_work + i;
if (!work->nbl) continue;
char path[256];
if (check_fmt) {
snprintf(path, 256, check_fmt, work->iv, work->iu);
work->check_path = strdup(path);
}
if (check_fct_fmt) {
snprintf(path, 256, check_fct_fmt, work->iv, work->iu);
work->check_fct_path = strdup(path);
}
if (check_degrid_fmt) {
snprintf(path, 256, check_degrid_fmt, work->iv, work->iu);
work->check_degrid_path = strdup(path);
}
work->check_hdf5 = hdf5 ? strdup(hdf5) : NULL;
work->check_threshold = threshold;
work->check_fct_threshold = fct_threshold;
work->check_degrid_threshold = degrid_threshold;
}
}
bool config_assign_work(struct work_config *cfg,
int facet_workers, int subgrid_workers)
{
cfg->facet_workers = facet_workers;
cfg->subgrid_workers = subgrid_workers;
// Generate work assignments
if (cfg->spec.time_count) {
printf("\nGenerating work assignments...\n");
if (!generate_facet_work_assignment(cfg))
return false;
if (!generate_subgrid_work_assignment(cfg))
return false;
} else {
if (!generate_full_redistribute_assignment(cfg))
return false;
}
// Warn if we have multiple facets per worker
if (cfg->facet_max_work > 1) {
printf("WARNING: %d facets, but only %d workers. Consider more MPI ranks.\n",
cfg->facet_count, cfg->facet_workers);
}
return true;
}
// Make baseline specification. Right now this is the same for every
// baseline, but this will change for baseline dependent averaging.
void vis_spec_to_bl_data(struct bl_data *bl, struct vis_spec *spec,
int a1, int a2)
{
int i;
// Create baseline structure
bl->time_count = spec->time_count;
bl->time = (double *)malloc(sizeof(double) * spec->time_count);
for (i = 0; i < spec->time_count; i++) {
bl->time[i] = spec->time_start + spec->time_step * i;
}
bl->uvw_m = (double *)malloc(sizeof(double) * spec->time_count * 3);
for (i = 0; i < spec->time_count; i++) {
ha_to_uvw_sc(spec->cfg, a1, a2,
spec->ha_sin[i], spec->ha_cos[i],
spec->dec_sin, spec->dec_cos,
bl->uvw_m + i*3);
}
bl->freq_count = spec->freq_count;
bl->freq = (double *)malloc(sizeof(double) * spec->freq_count);
for (i = 0; i < spec->freq_count; i++) {
bl->freq[i] = spec->freq_start + spec->freq_step * i;
}
bl->antenna1 = a1;
bl->antenna2 = a2;
}
bool create_bl_groups(hid_t vis_group, struct work_config *work_cfg, int worker)
{
struct vis_spec *spec = &work_cfg->spec;
struct ant_config *cfg = spec->cfg;
// Map baselines to work
struct subgrid_work **bl_work = NULL;
if (worker >= 0) {
bl_work = (struct subgrid_work **)
calloc(sizeof(struct subgrid_work *), cfg->ant_count * cfg->ant_count);
struct subgrid_work *work = work_cfg->subgrid_work + worker * work_cfg->subgrid_max_work;
int iwork;
for (iwork = 0; iwork < work_cfg->subgrid_max_work; iwork++) {
if (work[iwork].nbl == 0) continue;
struct subgrid_work_bl *bl;
for (bl = work[iwork].bls; bl; bl = bl->next) {
// Note this might overlap (here: overwrite). We are just
// interested in an example below.
bl_work[bl->a1 * cfg->ant_count + bl->a2] = &work[iwork];
}
}
}
int a1, a2;
int ncreated = 0;
uint64_t nvis = 0;
double create_start = get_time_ns();
for (a1 = 0; a1 < cfg->ant_count; a1++) {
// Progress message
if (a1 % 32 == 0) { printf("%d ", a1); fflush(stdout); }
hid_t a1_g = 0;
for (a2 = a1+1; a2 < cfg->ant_count; a2++) {
if (bl_work) {
struct subgrid_work *bw = bl_work[a1 * cfg->ant_count + a2];
if (!bw) continue;
}
// Create outer antenna group, if not already done so
if (!a1_g) {
char a1name[12];
sprintf(a1name, "%d", a1);
a1_g = H5Gcreate(vis_group, a1name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
if (a1_g < 0) {
fprintf(stderr, "Could not open '%s' antenna group!\n", a1name);
return false;
}
}
// Create inner antenna group
char a2name[12];
sprintf(a2name, "%d", a2);
hid_t a2_g = H5Gcreate(a1_g, a2name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
if (a2_g < 0) {
fprintf(stderr, "Could not open '%s' antenna group!\n", a2name);
H5Gclose(a1_g);
return false;
}
// Create baseline structure (TODO: baseline-dependent averaging...)
struct bl_data bl;
vis_spec_to_bl_data(&bl, spec, a1, a2);
// Write to visibility group
if (!create_vis_group(a2_g, spec->freq_chunk, spec->time_chunk,
work_cfg->vis_skip_metadata, &bl)) {
H5Gclose(a2_g); H5Gclose(a1_g);
return 1;
}
// Statistics & cleanups
ncreated++;
nvis += bl.time_count * bl.freq_count;
free(bl.time); free(bl.uvw_m); free(bl.freq);
H5Gclose(a2_g);
}
if (a1_g) H5Gclose(a1_g);
}
printf("\ndone in %.2fs, %d groups for up to %ld visibilities (~%.3f GB) created\n",
get_time_ns() -create_start, ncreated, nvis, 16. * nvis / 1000000000);
return true;
}
|
maxpool_with_mask.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
/*
* Highly specialized code, only works for TP3 L1
*/
#pragma once
#include "core/common/common.h"
#include "core/framework/op_kernel.h"
#include "core/framework/tensor.h"
#include "core/providers/cpu/nn/pool_base.h"
namespace onnxruntime {
namespace contrib {
class MaxpoolWithMask : public OpKernel, public PoolBase {
public:
MaxpoolWithMask(const OpKernelInfo& info) : OpKernel(info), PoolBase(info) {}
Status Compute(OpKernelContext* context) const override {
const Tensor* X = context->Input<Tensor>(0);
const Tensor* M = context->Input<Tensor>(1);
const TensorShape& x_shape = X->Shape();
const TensorShape& m_shape = M->Shape();
ORT_RETURN_IF_NOT(x_shape.NumDimensions() >= 3, "Input dimension cannot be less than 3.");
//TODO: fix this checker later
//ONNXRUNTIME_RETURN_IF_NOT((x_shape[2] == m_shape[2]) && (x_shape[3] == m_shape[3]), " Input shape and mask shape mismatch: ", x_shape, " vs ", m_shape);
std::vector<int64_t> pads = pads_;
std::vector<int64_t> kernel_shape = kernel_shape_;
std::vector<int64_t> output_dims = PoolBase::SetOutputSize(x_shape, x_shape[1], &pads, dilations_, ceil_mode_);
Tensor* Y = context->Output(0, TensorShape(output_dims));
const float* X_data = X->template Data<float>();
const int32_t* M_data = M->template Data<int32_t>();
float* Y_data = Y->template MutableData<float>();
// The main loop
int64_t channels = x_shape[1];
int64_t height = x_shape[2];
int64_t width = kernel_shape.size() > 1 ? x_shape[3] : 1;
int64_t depth = kernel_shape.size() > 2 ? x_shape[4] : 1;
int64_t pooled_height = output_dims[2];
int64_t pooled_width = kernel_shape.size() > 1 ? output_dims[3] : 1;
int64_t pooled_depth = kernel_shape.size() > 2 ? output_dims[4] : 1;
switch (kernel_shape.size()) {
case 1: {
int64_t x_step = height;
int64_t y_step = pooled_height;
const int64_t total_channels = x_shape[0] * channels;
const int64_t total_mask_channels = m_shape[0] * m_shape[1];
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (int64_t c = 0; c < total_channels; ++c) {
const float* x_d = X_data + c * x_step;
const int32_t* m_d = M_data + (c * x_step) % total_mask_channels;
float* y_d = Y_data + c * y_step;
for (int64_t ph = 0; ph < pooled_height; ++ph) {
int64_t hstart = ph * stride_h() - pads[0];
int64_t hend = std::min(hstart + kernel_shape[0], height);
hstart = std::max(hstart, static_cast<int64_t>(0));
float Yh = std::numeric_limits<float>::lowest();
for (int64_t h = hstart; h < hend; ++h) {
if (h >= 0 && m_d[h] == 0) break; // if mask == 0, stop
if (x_d[h] > Yh) {
Yh = x_d[h];
}
}
y_d[ph] = Yh;
}
}
break;
}
case 2: {
int64_t x_step = height * width;
int64_t y_step = pooled_height * pooled_width;
const int64_t total_channels = x_shape[0] * channels;
const int64_t total_mask_channels = m_shape[0] * m_shape[1];
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (int64_t c = 0; c < total_channels; ++c) {
const float* x_d = X_data + c * x_step;
const int32_t* m_d = M_data + (c * x_step) % total_mask_channels;
float* y_d = Y_data + c * y_step;
for (int64_t ph = 0; ph < pooled_height; ++ph) {
int64_t hstart = ph * stride_h() - pads[0];
int64_t hend = std::min(hstart + kernel_shape[0], height);
hstart = std::max(hstart, static_cast<int64_t>(0));
for (int64_t pw = 0; pw < pooled_width; ++pw) {
int64_t wstart = pw * stride_w() - pads[1];
int64_t wend = std::min(wstart + kernel_shape[1], width);
wstart = std::max(wstart, static_cast<int64_t>(0));
const int64_t pool_index = ph * pooled_width + pw;
float Yh = std::numeric_limits<float>::lowest();
for (int64_t h = hstart; h < hend; ++h) {
for (int64_t w = wstart; w < wend; ++w) {
const int64_t input_index = h * width + w;
if (input_index > 0 && m_d[input_index] == 0) break; // if mask == 0, break
if (x_d[input_index] > Yh) {
Yh = x_d[input_index];
}
}
}
y_d[pool_index] = Yh;
}
}
}
break;
}
case 3: {
int64_t x_step = height * width * depth;
int64_t y_step = pooled_height * pooled_width * pooled_depth;
const int64_t total_channels = x_shape[0] * channels;
const int64_t total_mask_channels = m_shape[0] * m_shape[1];
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (int64_t c = 0; c < total_channels; ++c) {
const float* x_d = X_data + c * x_step;
const int32_t* m_d = M_data + (c * x_step) % total_mask_channels;
float* y_d = Y_data + c * y_step;
for (int64_t ph = 0; ph < pooled_height; ++ph) {
int64_t hstart = ph * stride_h() - pads[0];
int64_t hend = std::min(hstart + kernel_shape[0], height);
hstart = std::max(hstart, static_cast<int64_t>(0));
for (int64_t pw = 0; pw < pooled_width; ++pw) {
int64_t wstart = pw * stride_w() - pads[1];
int64_t wend = std::min(wstart + kernel_shape[1], width);
wstart = std::max(wstart, static_cast<int64_t>(0));
for (int64_t pd = 0; pd < pooled_depth; ++pd) {
int64_t dstart = pd * stride_d() - pads[2];
int64_t dend = std::min(dstart + kernel_shape[2], depth);
dstart = std::max(dstart, static_cast<int64_t>(0));
const int64_t pool_index =
ph * pooled_width * pooled_depth + pw * pooled_depth + pd;
float Yh = std::numeric_limits<float>::lowest();
for (int64_t h = hstart; h < hend; ++h) {
for (int64_t w = wstart; w < wend; ++w) {
for (int64_t d = dstart; d < dend; ++d) {
const int64_t input_index = h * width * depth + w * depth + d;
if (input_index > 0 && m_d[input_index] == 0) break; // if mask == 0, break
if (x_d[input_index] > Yh) {
Yh = x_d[input_index];
}
}
}
}
y_d[pool_index] = Yh;
}
}
}
}
break;
}
default:
return Status(common::ONNXRUNTIME, common::INVALID_ARGUMENT, "Unsupported pooling size : ");
}
return Status::OK();
}
};
} // namespace contrib
} // namespace onnxruntime
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
(void) GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
(void) GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle)
{
KernelInfo
*clone_info,
*last;
clone_info=(KernelInfo *) NULL;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
register ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
register ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=1.0;
count=0;
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
{
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels,
*magick_restrict quantum_pixels;
register ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if ((traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
quantum_pixels=(const Quantum *) NULL;
maximum=0.0;
minimum=(double) QuantumRange;
switch (method)
{
case ConvolveMorphology:
{
pixel=bias;
break;
}
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
case HitAndMissMorphology:
case ErodeMorphology:
{
pixel=QuantumRange;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
count=0;
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
if (((image->alpha_trait & BlendPixelTrait) == 0) ||
((morphology_traits & BlendPixelTrait) == 0))
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
pixel-=maximum;
if (pixel < 0.0)
pixel=0.0;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
quantum_pixels=pixels;
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
quantum_pixels=pixels;
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentially white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
if (quantum_pixels != (const Quantum *) NULL)
{
SetPixelChannel(morphology_image,channel,quantum_pixels[i],q);
continue;
}
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showKernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showKernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showKernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register ssize_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register double
pos_scale,
neg_scale;
register ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showKernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
cache_wkld.c | /*
* Code to simulate a cache-intensive workload for lab assignment in [A2] Task Mapping on Soft Heterogeneous Systems.
* Workload consists of a simple parallel initilization routine.
* Implementation is not optimized! Only meant to be used in conjunction with lab assignment.
* The performance of the workload is sensitive to the cache siz. In a task mapping scenario,
* this workload should be mapped to the set of cores with largest cache.
*
* @author: Apan Qasem <apan@txstate.edu>
* @date: 04/02/20
*
* @update: 03/12/21
*/
#include<stdlib.h>
#include<stdio.h>
#include<sys/time.h>
#include <omp.h>
#define ELEMENTS_TO_VERIFY 1
#define REPS 50
/* timer function */
double get_time_in_seconds() {
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
int main(int argc, char *argv[]) {
int **a;
if (argc < 2) {
printf("usage: \n");
printf(" ./cache_wkld N t\n");
printf(" N = input size\n");
printf(" t = number of OpenMP threads\n");
exit(0);
}
long long N = atoi(argv[1]);
unsigned threads = atoi(argv[2]);
omp_set_num_threads(threads);
a = (int **) malloc(sizeof(int *) * N);
int i,j,k;
for (i = 0; i < N; i++)
a[i] = (int *) malloc(sizeof(int) * N);
double start_time, end_time;
start_time = get_time_in_seconds();
#pragma omp parallel for private(j,i)
for (k = 0; k < REPS; k++) {
for (j = 1; j < N; j++)
for (i = 1; i < N; i++)
a[i][j] = 17;
}
end_time = get_time_in_seconds();
fprintf(stdout, "\033[1;33m[wk2] compute time = %.3f s\n\033[0m", end_time - start_time);
#ifdef VERIFY
fprintf(stdout, "Verification: ");
for (int i = 0; i < ELEMENTS_TO_VERIFY; i++)
fprintf(stdout, "%d\n", a[0][i]);
#endif
return 0;
}
|
kvstore_local.h | /**
* Copyright (c) 2015 by Contributors
* @file kvstore_local.h
* @brief local implementation
*/
#ifndef MXNET_KVSTORE_KVSTORE_LOCAL_H_
#define MXNET_KVSTORE_KVSTORE_LOCAL_H_
#include <mxnet/kvstore.h>
#include <unordered_map>
#include <bitset>
#include <vector>
#include <utility>
#include <algorithm>
namespace mxnet {
namespace kvstore {
/**
* \brief store data in local machine
*/
class KVStoreLocal : public KVStore {
public:
KVStoreLocal() {
pinned_ctx_ = (MXNET_USE_CUDA != 0) ?
Context::CPUPinned(0) : Context::CPU();
// the server perameters
nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4);
bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000);
}
void Init(const std::vector<int>& keys,
const std::vector<NDArray>& values) override {
for (size_t i = 0; i < keys.size(); ++i) {
CHECK(local_.find(keys[i]) == local_.end())
<< "duplicate init of key " << keys[i];
local_[keys[i]] = values[i].Copy(pinned_ctx_);
}
}
void Push(const std::vector<int>& keys,
const std::vector<NDArray>& values,
int priority) override {
std::vector<int> uniq_keys;
std::vector<std::vector<NDArray> > grouped_vals;
GroupKVPairs(keys, values, &uniq_keys, &grouped_vals);
for (size_t i = 0; i < uniq_keys.size(); ++i) {
int key = uniq_keys[i];
const NDArray& merged = MergePushValue(key, grouped_vals[i], priority);
if (updater_ != nullptr) {
auto it = local_.find(key);
CHECK(it != local_.end()) << "key " << key << " has not been inited";
updater_(key, merged, &(it->second));
}
}
}
void Pull(const std::vector<int>& keys,
const std::vector<NDArray*>& values,
int priority) override {
std::vector<int> uniq_keys;
std::vector<std::vector<NDArray*> > grouped_vals;
GroupKVPairs(keys, values, &uniq_keys, &grouped_vals);
for (size_t i = 0; i < uniq_keys.size(); ++i) {
int key = uniq_keys[i];
auto it = merge_buf_.find(key);
if (updater_ != nullptr || it == merge_buf_.end()) {
auto it = local_.find(key);
CHECK(it != local_.end()) << "key " << key << " has not been inited";
const NDArray& src = it->second;
for (auto* vptr : grouped_vals[i]) {
CopyFromTo(src, vptr, priority);
}
} else {
auto& src = it->second.merged;
for (auto* vptr : grouped_vals[i]) {
CopyFromTo(src, vptr, priority);
}
}
}
}
protected:
/// \brief temperal space for pushing and pull
struct BufferEntry {
// Context of merged
Context ctx;
// the merged value
NDArray merged;
/// \brief the cpu buffer for gpu data
std::vector<NDArray> copy_buf;
// allocate copy buffer, if it has not been allocated
inline NDArray *AllocCopyBuf(size_t index, Context ctx, const TShape& shape) {
if (index >= copy_buf.size()) copy_buf.resize(index + 1);
if (copy_buf[index].is_none()) {
copy_buf[index] = NDArray(shape, ctx);
}
return ©_buf[index];
}
};
/**
* \brief group values on keys
*/
template <typename V>
void GroupKVPairs(const std::vector<int>& keys,
const std::vector<V>& values,
std::vector<int>* uniq_keys,
std::vector<std::vector<V> >* grouped_vals) {
CHECK_EQ(keys.size(), values.size());
// TODO(mli) check if already sorted as an optimization
using Idx = std::pair<int, int>;
std::vector<Idx> idx(keys.size());
for (size_t i = 0; i < keys.size(); ++i) {
idx[i].first = keys[i]; idx[i].second = i;
}
std::sort(idx.begin(), idx.end(), [](const Idx& a, const Idx& b) {
return a.first < b.first;
});
int pre_key = idx[0].first - 1;
for (auto i : idx) {
if (i.first != pre_key) {
uniq_keys->push_back(i.first);
grouped_vals->push_back({values[i.second]});
pre_key = i.first;;
} else {
grouped_vals->back().push_back(values[i.second]);
}
}
}
/*!
* \brief returns the aggregated push value
*/
virtual const NDArray& MergePushValue(
int key, const std::vector<NDArray>& val, int priority) {
auto& buf = merge_buf_[key];
// copy buffer
std::vector<Engine::VarHandle> const_vars(val.size() - 1);
std::vector<NDArray> reduce(val.size());
if (buf.merged.is_none()) {
buf.ctx = Context::CPUPinned(val[0].ctx().dev_id);
if (MXNET_USE_CUDA == 0) buf.ctx = Context::CPU();
buf.merged = NDArray(val[0].shape(), buf.ctx);
}
CopyFromTo(val[0], &(buf.merged), priority);
reduce[0] = buf.merged;
for (size_t i = 1; i < val.size(); ++i) {
const NDArray& v = val[i];
Context ctx = v.ctx();
if (ctx.dev_mask() == cpu::kDevMask) {
reduce[i] = val[i];
} else {
NDArray *copy_buf = buf.AllocCopyBuf(
i, Context::CPUPinned(ctx.dev_id), val[0].shape());
CopyFromTo(val[i], copy_buf, priority);
reduce[i] = *copy_buf;
}
const_vars[i - 1] = reduce[i].var();
}
Engine::Get()->PushSync([reduce, this](RunContext rctx) {
ReduceSumCPU(reduce);
}, Context::CPU(), const_vars, {reduce[0].var()},
FnProperty::kCPUPrioritized, priority);
return buf.merged;
}
/// \brief buffer for merging push value
std::unordered_map<int, BufferEntry> merge_buf_;
// pinned context
Context pinned_ctx_;
// the lower bound of a big array
size_t bigarray_bound_;
private:
inline static void ReduceSumCPU(const std::vector<real_t*> &dptr,
size_t offset, index_t size) {
using namespace mshadow; // NOLINT(*)
Tensor<cpu, 1> in_0(dptr[0] + offset, Shape1(size));
switch (dptr.size()) {
case 2: {
Tensor<cpu, 1> in_1(dptr[1] + offset, Shape1(size));
in_0 += in_1;
break;
}
case 3: {
Tensor<cpu, 1> in_1(dptr[1] + offset, Shape1(size));
Tensor<cpu, 1> in_2(dptr[2] + offset, Shape1(size));
in_0 += in_1 + in_2;
break;
}
case 4: {
Tensor<cpu, 1> in_1(dptr[1] + offset, Shape1(size));
Tensor<cpu, 1> in_2(dptr[2] + offset, Shape1(size));
Tensor<cpu, 1> in_3(dptr[3] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3;
break;
}
default: {
for (size_t i = 1; i < dptr.size(); ++i) {
Tensor<cpu, 1> in_k(dptr[i] + offset, Shape1(size));
in_0 += in_k;
}
}
}
}
// reduce sum into val[0]
// this is performance critical
inline void ReduceSumCPU(const std::vector<NDArray> &in_data) {
const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10));
// ge ptr out
std::vector<real_t*> dptr(in_data.size());
for (size_t i = 0; i < in_data.size(); ++i) {
TBlob data = in_data[i].data();
CHECK(data.CheckContiguous());
dptr[i] = data.FlatTo2D<cpu, real_t>().dptr_;
}
size_t total = in_data[0].shape().Size();
long ntask = (total + step - 1) / step; // NOLINT(*)
if (total < bigarray_bound_ || nthread_reduction_ <= 1) {
ReduceSumCPU(dptr, 0, total);
} else {
#pragma omp parallel for schedule(static) num_threads(nthread_reduction_)
for (long j = 0; j < ntask; ++j) { // NOLINT(*)
size_t k = static_cast<size_t>(j);
size_t begin = std::min(k * step, total);
size_t end = std::min((k + 1) * step, total);
if (j == ntask - 1) CHECK_EQ(end, total);
ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin));
}
}
}
/// \brief buffer for storing local values
std::unordered_map<int, NDArray> local_;
// number of threads to do reduction
int nthread_reduction_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_KVSTORE_LOCAL_H_
|
bench.c | #include <stdio.h>
#include <limits.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
// #define V 30000
int num;
int minKeySeq(int key[], int visited[], int V)
{
// Initialize min value
int min = INT_MAX, min_index;
int v;
for (v = 0; v < V; v++)
{
if (visited[v] == 0 && key[v] < min)
{
min = key[v], min_index = v;
}
}
return min_index;
}
void primMSTSeq(int **graph, int V)
{
double startSeq = omp_get_wtime();
int from[V]; // Array to store constructed MST
int key[V]; // Key values used to pick minimum weight edge in cut
int visited[V]; // To represent set of vertices not yet included in MST
int i, count;
// Initialize all keys as INFINITE
for (i = 0; i < V; i++)
key[i] = INT_MAX, visited[i] = 0;
//start with first vertex
key[0] = 0; // Make key 0 so that this vertex is picked as first vertex
from[0] = -1; // First node is always root of MST
// The MST will have V vertices
for (count = 0; count < V - 1; count++)
{
// Pick the minimum key vertex from the set of vertices
// not yet included in MST
int u = minKeySeq(key, visited, V);
// Add the picked vertex to the MST Set
visited[u] = 1;
int v;
// Update key value and from index of the adjacent vertices of
// the picked vertex. Consider only those vertices which are not yet
// included in MST
for (v = 0; v < V; v++)
if (graph[u][v] && visited[v] == 0 && graph[u][v] < key[v])
from[v] = u, key[v] = graph[u][v];
}
double endSeq = omp_get_wtime();
printf("%f, ", endSeq - startSeq);
// printMST(from, V, graph);
}
int minKey(int key[], int visited[], int V)
{
int min = INT_MAX, index, i;
omp_set_num_threads(4);
#pragma omp parallel
{
num = omp_get_num_threads();
int index_local = index;
int min_local = min;
#pragma omp for nowait
for (i = 0; i < V; i++)
{
if (visited[i] == 0 && key[i] < min_local)
{
min_local = key[i];
index_local = i;
}
}
#pragma omp critical
{
if (min_local < min)
{
min = min_local;
index = index_local;
}
}
}
return index;
}
void printMST(int from[], int n, int **graph, int V)
{
int i;
printf("Edge Weight\n");
for (i = 1; i < V; i++)
printf("%d - %d %d \n", from[i], i, graph[i][from[i]]);
}
void primMST(int **graph, int V)
{
double start = omp_get_wtime();
int from[V];
int key[V], num_threads;
int visited[V];
int i, count;
for (i = 0; i < V; i++)
key[i] = INT_MAX, visited[i] = 0;
key[0] = 0;
from[0] = -1;
for (count = 0; count < V - 1; count++)
{
int u = minKey(key, visited, V);
visited[u] = 1;
int v;
#pragma omp parallel for schedule(static)
for (v = 0; v < V; v++)
{
if (graph[u][v] && visited[v] == 0 && graph[u][v] < key[v])
from[v] = u, key[v] = graph[u][v];
}
}
double end = omp_get_wtime();
printf("%f, %d\n", end - start, num);
// printMST(from, V, graph);
// printf("\n%d threads are created in primMST\n", num_threads);
}
int main(int argc, char *argv[])
{
// int graph[V][V];
int V = atoi(argv[1]);
int **graph = (int **)malloc(V * sizeof(int *));
for (int x=0; x<V; x++)
graph[x] = (int *)malloc(V * sizeof(int));
int i, j;
//Generate random adjacency matrix
srand(time(NULL));
for (i = 0; i < V; i++)
for (j = 0; j < V; j++)
graph[i][j] = rand() % 10;
for (i = 0; i < V; i++)
{
graph[i][i] = 0;
}
for (i = 0; i < V; i++)
for (j = 0; j < V; j++)
graph[j][i] = graph[i][j];
//Print adjacency matrix
// for (i = 0; i < V; i++)
// {
// for (j = 0; j < V; j++)
// {
// printf("%d ", graph[i][j]);
// }
// printf("\n");
// }
printf("%d, ", V);
primMSTSeq(graph, V);
primMST(graph, V);
return 0;
}
|
pageRank.c | // -----------------------------------------------------------------------------
//
// "00_AccelGraph"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : pageRank.c
// Create : 2019-09-28 14:41:30
// Revise : 2019-09-28 15:34:11
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#include "timer.h"
#include "myMalloc.h"
#include "boolean.h"
#include "arrayQueue.h"
#include "bitmap.h"
#include "worklist.h"
#include "graphConfig.h"
#include "fixedPoint.h"
#include "quantization.h"
#include "reorder.h"
#include "graphCSR.h"
#include "graphGrid.h"
#include "graphAdjArrayList.h"
#include "graphAdjLinkedList.h"
#include "libcxl.h"
#include "capienv.h"
#include "pageRank.h"
// ********************************************************************************************
// *************** Stats DataStructure **************
// ********************************************************************************************
struct PageRankStats *newPageRankStatsGraphCSR(struct GraphCSR *graph)
{
uint32_t v;
struct PageRankStats *stats = (struct PageRankStats *) my_malloc(sizeof(struct PageRankStats));
stats->damp = Damp;
stats->base_pr = (1.0f - stats->damp);
stats->iterations = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0;
stats->error_total = 0.0;
stats->realRanks = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));;
stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));;
#pragma omp parallel for default(none) private(v) shared(stats)
for(v = 0; v < stats->num_vertices; v++)
{
stats->pageRanks[v] = stats->base_pr;
stats->realRanks[v] = v;
}
return stats;
}
struct PageRankStats *newPageRankStatsGraphGrid(struct GraphGrid *graph)
{
uint32_t v;
struct PageRankStats *stats = (struct PageRankStats *) my_malloc(sizeof(struct PageRankStats));
stats->damp = Damp;
stats->base_pr = (1.0f - stats->damp);
stats->iterations = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0;
stats->error_total = 0.0;
stats->realRanks = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));;
stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));;
#pragma omp parallel for default(none) private(v) shared(stats)
for(v = 0; v < stats->num_vertices; v++)
{
stats->pageRanks[v] = stats->base_pr;
stats->realRanks[v] = v;
}
return stats;
}
struct PageRankStats *newPageRankStatsGraphAdjArrayList(struct GraphAdjArrayList *graph)
{
uint32_t v;
struct PageRankStats *stats = (struct PageRankStats *) my_malloc(sizeof(struct PageRankStats));
stats->damp = Damp;
stats->base_pr = (1.0f - stats->damp);
stats->iterations = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0;
stats->error_total = 0.0;
stats->realRanks = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));;
stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));;
#pragma omp parallel for default(none) private(v) shared(stats)
for(v = 0; v < stats->num_vertices; v++)
{
stats->pageRanks[v] = stats->base_pr;
stats->realRanks[v] = v;
}
return stats;
}
struct PageRankStats *newPageRankStatsGraphAdjLinkedList(struct GraphAdjLinkedList *graph)
{
uint32_t v;
struct PageRankStats *stats = (struct PageRankStats *) my_malloc(sizeof(struct PageRankStats));
stats->damp = Damp;
stats->base_pr = (1.0f - stats->damp);
stats->iterations = 0;
stats->num_vertices = graph->num_vertices;
stats->time_total = 0.0;
stats->error_total = 0.0;
stats->realRanks = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));;
stats->pageRanks = (float *) my_malloc(graph->num_vertices * sizeof(float));;
#pragma omp parallel for default(none) private(v) shared(stats)
for(v = 0; v < stats->num_vertices; v++)
{
stats->pageRanks[v] = stats->base_pr;
stats->realRanks[v] = v;
}
return stats;
}
void freePageRankStats(struct PageRankStats *stats)
{
if(stats)
{
if(stats->realRanks)
free(stats->realRanks);
if(stats->pageRanks)
free(stats->pageRanks);
free(stats);
}
}
// ********************************************************************************************
// *************** Auxilary functions **************
// ********************************************************************************************
void addAtomicFloat(float *num, float value)
{
float newV, oldV;
uint32_t *lnewV;
uint32_t *loldV;
do
{
oldV = *num;
newV = oldV + value;
loldV = (uint32_t *)&oldV;
lnewV = (uint32_t *)&newV;
}
while(!__sync_bool_compare_and_swap((uint32_t *)num, *(loldV), *(lnewV)));
}
void addAtomicDouble(double *num, double value)
{
double newV, oldV;
uint64_t *lnewV;
uint64_t *loldV;
do
{
oldV = *num;
newV = oldV + value;
loldV = (uint64_t *)&oldV;
lnewV = (uint64_t *)&newV;
}
while(!__sync_bool_compare_and_swap((uint64_t *)num, *(loldV), *(lnewV)));
}
void setAtomic(uint64_t *num, uint64_t value)
{
uint64_t newV, oldV;
do
{
oldV = *num;
newV = value;
}
while(!__sync_bool_compare_and_swap(num, oldV, newV));
}
void addAtomicFixedPoint(uint64_t *num, uint64_t value)
{
uint64_t newV, oldV;
do
{
oldV = *num;
newV = oldV + value;
}
while(!__sync_bool_compare_and_swap(num, oldV, newV));
}
void pageRankPrint(float *pageRankArray, uint32_t num_vertices)
{
uint32_t v;
for(v = 0; v < num_vertices; v++)
{
printf("Rank[%d]=%f \n", v, pageRankArray[v]);
}
}
// ********************************************************************************************
// *************** GRID DataStructure **************
// ********************************************************************************************
// function STREAMVERTICES(Fv,F)
// Sum = 0
// for each vertex do
// if F(vertex) then
// Sum += Fv(edge)
// end if
// end for
// return Sum
// end function
// function STREAMEDGES(Fe,F)
// Sum = 0
// for each active block do >> block with active edges
// for each edge ∈ block do
// if F(edge.source) then
// Sum += Fe(edge)
// end if
// end for
// end for
// return Sum
// end function
//we assume that the edges are not sorted in each partition
struct PageRankStats *pageRankGraphGrid(struct Arguments *arguments, struct GraphGrid *graph)
{
struct PageRankStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // push
stats = pageRankPullRowGraphGrid(arguments, graph);
break;
case 1: // pull
stats = pageRankPushColumnGraphGrid(arguments, graph);
break;
case 2: // pull
stats = pageRankPullRowFixedPointGraphGrid(arguments, graph);
break;
case 3: // push
stats = pageRankPushColumnFixedPointGraphGrid(arguments, graph);
break;
default:// pull
stats = pageRankPullRowGraphGrid(arguments, graph);
break;
}
return stats;
}
struct PageRankStats *pageRankPullRowGraphGrid(struct Arguments *arguments, struct GraphGrid *graph)
{
double error_total = 0.0;
uint32_t v;
uint32_t activeVertices = 0;
// float init_pr = 1.0f / (float)graph->num_vertices;
uint32_t totalPartitions = graph->grid->num_partitions;
struct PageRankStats *stats = newPageRankStatsGraphGrid(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Row (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0.0f;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->grid->out_degree[v])
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->grid->out_degree[v];
else
riDividedOnDiClause[v] = 0.0f;
}
// pageRankStreamEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext);
uint32_t i;
// #pragma omp parallel for private(i)
for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise
{
uint32_t j;
#pragma omp parallel for private(j)
for (j = 0; j < totalPartitions; ++j)
{
uint32_t k;
uint32_t src;
uint32_t dest;
struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];
for (k = 0; k < partition->num_edges; ++k)
{
src = partition->edgeList->edges_array_src[k];
dest = partition->edgeList->edges_array_dest[k];
// #pragma omp atomic update
// __sync_fetch_and_add(&pageRanksNext[dest],riDividedOnDiClause[src]);
// addAtomicFloat(float *num, float value)
// #pragma omp atomic update
pageRanksNext[dest] += riDividedOnDiClause[src];
}
}
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext, stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// printf(" -----------------------------------------------------\n");
// printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total);
// printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(timer);
free(timer_inner);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPullRowFixedPointGraphGrid(struct Arguments *arguments, struct GraphGrid *graph)
{
double error_total = 0.0;
uint32_t v;
uint32_t activeVertices = 0;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphGrid(graph);
uint32_t totalPartitions = graph->grid->num_partitions;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Row FP (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->grid->out_degree[v])
riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->grid->out_degree[v]);
else
riDividedOnDiClause[v] = 0.0f;
}
// pageRankStreamEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext);
uint32_t i;
// #pragma omp parallel for private(i)
for (i = 0; i < totalPartitions; ++i) // iterate over partitions rowwise
{
uint32_t j;
#pragma omp parallel for private(j)
for (j = 0; j < totalPartitions; ++j)
{
uint32_t k;
uint32_t src;
uint32_t dest;
struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];
for (k = 0; k < partition->num_edges; ++k)
{
src = partition->edgeList->edges_array_src[k];
dest = partition->edgeList->edges_array_dest[k];
// #pragma omp atomic update
pageRanksNext[dest] += riDividedOnDiClause[src];
}
}
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// printf(" -----------------------------------------------------\n");
// printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total);
// printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(timer);
free(timer_inner);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
/******************************************************************/
struct PageRankStats *pageRankPushColumnGraphGrid(struct Arguments *arguments, struct GraphGrid *graph)
{
double error_total = 0.0;
uint32_t v;
uint32_t activeVertices = 0;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphGrid(graph);
uint32_t totalPartitions = graph->grid->num_partitions;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Col (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0.0f;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->grid->out_degree[v])
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->grid->out_degree[v];
else
riDividedOnDiClause[v] = 0.0f;
}
// pageRankStreamEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext);
uint32_t j;
#pragma omp parallel for private(j)
for (j = 0; j < totalPartitions; ++j)
{
uint32_t i;
// #pragma omp parallel for private(i) // iterate over partitions columnwise
for (i = 0; i < totalPartitions; ++i)
{
uint32_t k;
uint32_t src;
uint32_t dest;
struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];
for (k = 0; k < partition->num_edges; ++k)
{
src = partition->edgeList->edges_array_src[k];
dest = partition->edgeList->edges_array_dest[k];
// #pragma omp atomic update
pageRanksNext[dest] += riDividedOnDiClause[src];
// addAtomicFloat(&pageRanksNext[dest] , riDividedOnDiClause[src]);
}
}
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// printf(" -----------------------------------------------------\n");
// printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total);
// printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(timer);
free(timer_inner);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPushColumnFixedPointGraphGrid(struct Arguments *arguments, struct GraphGrid *graph)
{
double error_total = 0.0;
uint32_t v;
uint32_t activeVertices = 0;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphGrid(graph);
uint32_t totalPartitions = graph->grid->num_partitions;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Col FP (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0.0f;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->grid->out_degree[v])
riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->grid->out_degree[v]);
else
riDividedOnDiClause[v] = 0.0f;
}
// pageRankStreamEdgesGraphGridRowWise(graph, riDividedOnDiClause, pageRanksNext);
uint32_t j;
#pragma omp parallel for private(j)
for (j = 0; j < totalPartitions; ++j) // iterate over partitions columnwise
{
uint32_t i;
for (i = 0; i < totalPartitions; ++i)
{
uint32_t k;
uint32_t src;
uint32_t dest;
struct Partition *partition = &graph->grid->partitions[(i * totalPartitions) + j];
for (k = 0; k < partition->num_edges; ++k)
{
src = partition->edgeList->edges_array_src[k];
dest = partition->edgeList->edges_array_dest[k];
// #pragma omp atomic update
pageRanksNext[dest] += riDividedOnDiClause[src];
// addAtomicFloat(&pageRanksNext[dest] , riDividedOnDiClause[src]);
}
}
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// printf(" -----------------------------------------------------\n");
// printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total);
// printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(timer);
free(timer_inner);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
// ********************************************************************************************
// *************** CSR DataStructure CAPI Supported **************
// ********************************************************************************************
struct PageRankStats *pageRankGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
struct PageRankStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = pageRankPullGraphCSR(arguments, graph);
break;
case 1: // push
stats = pageRankPushGraphCSR(arguments, graph);
break;
case 2: // pull 64bit FP
stats = pageRankPullFixedPoint64BitGraphCSR(arguments, graph);
break;
case 3: // push
stats = pageRankPushFixedPointGraphCSR(arguments, graph);
break;
case 4: // pull 32bit Quant
stats = pageRankPullQuant32BitGraphCSR(arguments, graph);
break;
case 5: // push
stats = pageRankPushQuantGraphCSR(arguments, graph);
break;
case 6: // pull
stats = pageRankDataDrivenPullGraphCSR(arguments, graph);
break;
case 7: // push
stats = pageRankDataDrivenPushGraphCSR(arguments, graph);
break;
case 8: // pullpush
stats = pageRankDataDrivenPullPushGraphCSR(arguments, graph);
break;
case 9: // pull 32bit FP
stats = pageRankPullFixedPoint32BitGraphCSR(arguments, graph);
break;
case 10: // pull 16bit FP
stats = pageRankPullFixedPoint16BitGraphCSR(arguments, graph);
break;
case 11: // pull 8bit FP
stats = pageRankPullFixedPoint8BitGraphCSR(arguments, graph);
break;
case 12: // pull 16bit Quant
stats = pageRankPullQuant16BitGraphCSR(arguments, graph);
break;
case 13: // pull 8bit Quant
stats = pageRankPullQuant8BitGraphCSR(arguments, graph);
break;
// case 9: // push
// pageRankDataDrivenPullFixedPointGraphCSR(arguments, graph);
// break;
// case 10: // pull
// pageRankDataDrivenPushFixedPointGraphCSR(arguments, graph);
// break;
default:// pull
stats = pageRankPullGraphCSR(arguments, graph);
break;
}
return stats;
}
// topolgy driven approach
struct PageRankStats *pageRankPullGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
double error_total = 0.0;
uint32_t j;
uint32_t v;
uint32_t u;
uint32_t degree;
uint32_t edge_idx;
uint32_t activeVertices = 0;
//CAPI variables
struct cxl_afu_h *afu;
struct WEDGraphCSR *wedGraphCSR;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", " ---->>> CAPI <<<----");
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
// ********************************************************************************************
// *************** MAP CSR DataStructure **************
// ********************************************************************************************
wedGraphCSR = mapGraphCSRToWED((struct GraphCSR *)graph);
wedGraphCSR->auxiliary1 = riDividedOnDiClause;
wedGraphCSR->auxiliary2 = pageRanksNext;
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUGraphCSR(&afu, wedGraphCSR);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((arguments->cu_config << 32) | (arguments->ker_numThreads));
afu_status.cu_config_2 = arguments->cu_config_2; // non zero CU triggers the AFU to work
afu_status.cu_stop = wedGraphCSR->num_vertices; // stop condition once all vertices processed
startAFU(&afu, &afu_status);
// ********************************************************************************************
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices->out_degree[v])
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];
else
riDividedOnDiClause[v] = 0.0f;
}
// Stop(timer_inner);
// printf("|A %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
// ********************************************************************************************
// *************** START CU **************
startCU(&afu, &afu_status);
// ********************************************************************************************
// ********************************************************************************************
// *************** WAIT AFU **************
waitAFU(&afu, &afu_status);
// ********************************************************************************************
// Stop(timer_inner);
// printf("|B %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// ********************************************************************************************
// *************** Releasing AFU **************
releaseAFU(&afu);
// ********************************************************************************************
free(timer);
free(timer_inner);
free(pageRanksNext);
free(riDividedOnDiClause);
free(wedGraphCSR);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPushGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
double error_total = 0.0;
// uint32_t i;
uint32_t v;
// double error = 0;
uint32_t activeVertices = 0;
//CAPI variables
struct cxl_afu_h *afu;
struct WEDGraphCSR *wedGraphCSR;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", " ---->>> CAPI <<<----");
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Push (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
// ********************************************************************************************
// *************** MAP CSR DataStructure **************
// ********************************************************************************************
wedGraphCSR = mapGraphCSRToWED((struct GraphCSR *)graph);
wedGraphCSR->auxiliary1 = riDividedOnDiClause;
wedGraphCSR->auxiliary2 = pageRanksNext;
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUGraphCSR(&afu, wedGraphCSR);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((arguments->cu_config << 32) | (arguments->ker_numThreads));
afu_status.cu_config_2 = arguments->cu_config_2; // non zero CU triggers the AFU to work
afu_status.cu_stop = wedGraphCSR->num_vertices; // stop condition once all vertices processed
startAFU(&afu, &afu_status);
// ********************************************************************************************
#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices->out_degree[v])
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];
else
riDividedOnDiClause[v] = 0.0f;
}
// Stop(timer_inner);
// printf("|A %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
// ********************************************************************************************
// *************** START CU **************
startCU(&afu, &afu_status);
// ********************************************************************************************
// ********************************************************************************************
// *************** WAIT AFU **************
waitAFU(&afu, &afu_status);
// ********************************************************************************************
// Stop(timer_inner);
// printf("|B %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
// ********************************************************************************************
// *************** Releasing AFU **************
releaseAFU(&afu);
// ********************************************************************************************
free(timer);
free(timer_inner);
free(pageRanksNext);
free(riDividedOnDiClause);
free(wedGraphCSR);
stats->error_total = error_total;
return stats;
}
// topoligy driven approach
struct PageRankStats *pageRankPullFixedPoint64BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
double error_total = 0.0;
uint32_t j;
uint32_t v;
uint32_t u;
uint32_t degree;
uint32_t edge_idx;
uint32_t activeVertices = 0;
//CAPI variables
struct cxl_afu_h *afu;
struct WEDGraphCSR *wedGraphCSR;
// float init_pr = 1.0f / (float)graph->num_vertices;
// uint64_t stats->base_pr_fp = FloatToFixed64(stats->base_pr);
// uint64_t epsilon_fp = DoubleToFixed64(arguments->epsilon);
// uint64_t num_vertices_fp = UInt32ToFixed64();
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
// uint64_t* outDegreesFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t));
// uint64_t* pageRanksFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", " ---->>> CAPI <<<----");
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull FP_64 (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
// ********************************************************************************************
// *************** MAP CSR DataStructure **************
// ********************************************************************************************
wedGraphCSR = mapGraphCSRToWED((struct GraphCSR *)graph);
wedGraphCSR->auxiliary1 = riDividedOnDiClause;
wedGraphCSR->auxiliary2 = pageRanksNext;
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUGraphCSR(&afu, wedGraphCSR);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((arguments->cu_config << 32) | (arguments->ker_numThreads));
afu_status.cu_config_2 = arguments->cu_config_2; // non zero CU triggers the AFU to work
afu_status.cu_stop = wedGraphCSR->num_vertices; // stop condition once all vertices processed
startAFU(&afu, &afu_status);
// ********************************************************************************************
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices->out_degree[v])
riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices->out_degree[v]);
else
riDividedOnDiClause[v] = 0.0f;
}
// Stop(timer_inner);
// printf("|A %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
// ********************************************************************************************
// *************** START CU **************
startCU(&afu, &afu_status);
// ********************************************************************************************
// ********************************************************************************************
// *************** WAIT AFU **************
waitAFU(&afu, &afu_status);
// ********************************************************************************************
// Stop(timer_inner);
// printf("|B %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));
stats->pageRanks[v] = nextPageRank;
// pageRanksFP[v] = FloatToFixed(nextPageRank);
pageRanksNext[v] = 0;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
// ********************************************************************************************
// *************** Releasing AFU **************
releaseAFU(&afu);
// ********************************************************************************************
free(timer);
free(timer_inner);
free(riDividedOnDiClause);
free(pageRanksNext);
free(wedGraphCSR);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPullFixedPoint32BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
double error_total = 0.0;
uint32_t j;
uint32_t v;
uint32_t u;
uint32_t degree;
uint32_t edge_idx;
uint32_t activeVertices = 0;
//CAPI variables
struct cxl_afu_h *afu;
struct WEDGraphCSR *wedGraphCSR;
// float init_pr = 1.0f / (float)graph->num_vertices;
// uint64_t stats->base_pr_fp = FloatToFixed64(stats->base_pr);
// uint64_t epsilon_fp = DoubleToFixed64(arguments->epsilon);
// uint64_t num_vertices_fp = UInt32ToFixed64();
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
uint32_t *riDividedOnDiClause = (uint32_t *) my_malloc(graph->num_vertices * sizeof(uint32_t));
// uint64_t* outDegreesFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t));
// uint64_t* pageRanksFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", " ---->>> CAPI <<<----");
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull FP_32 (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
// ********************************************************************************************
// *************** MAP CSR DataStructure **************
// ********************************************************************************************
wedGraphCSR = mapGraphCSRToWED((struct GraphCSR *)graph);
wedGraphCSR->auxiliary1 = riDividedOnDiClause;
wedGraphCSR->auxiliary2 = pageRanksNext;
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUGraphCSR(&afu, wedGraphCSR);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((arguments->cu_config << 32) | (arguments->ker_numThreads));
afu_status.cu_config_2 = arguments->cu_config_2; // non zero CU triggers the AFU to work
afu_status.cu_stop = wedGraphCSR->num_vertices; // stop condition once all vertices processed
startAFU(&afu, &afu_status);
// ********************************************************************************************
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices->out_degree[v])
riDividedOnDiClause[v] = FloatToFixed32(stats->pageRanks[v] / graph->vertices->out_degree[v]);
else
riDividedOnDiClause[v] = 0.0f;
}
// Stop(timer_inner);
// printf("|A %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
// ********************************************************************************************
// *************** START CU **************
startCU(&afu, &afu_status);
// ********************************************************************************************
// ********************************************************************************************
// *************** WAIT AFU **************
waitAFU(&afu, &afu_status);
// ********************************************************************************************
// Stop(timer_inner);
// printf("|B %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * Fixed32ToFloat(pageRanksNext[v]));
stats->pageRanks[v] = nextPageRank;
// pageRanksFP[v] = FloatToFixed(nextPageRank);
pageRanksNext[v] = 0;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// ********************************************************************************************
// *************** Releasing AFU **************
releaseAFU(&afu);
// ********************************************************************************************
free(timer);
free(timer_inner);
free(riDividedOnDiClause);
free(pageRanksNext);
free(wedGraphCSR);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPullFixedPoint16BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
double error_total = 0.0;
uint32_t j;
uint32_t v;
uint32_t u;
uint32_t degree;
uint32_t edge_idx;
uint32_t activeVertices = 0;
//CAPI variables
struct cxl_afu_h *afu;
struct WEDGraphCSR *wedGraphCSR;
// float init_pr = 1.0f / (float)graph->num_vertices;
// uint64_t stats->base_pr_fp = FloatToFixed64(stats->base_pr);
// uint64_t epsilon_fp = DoubleToFixed64(arguments->epsilon);
// uint64_t num_vertices_fp = UInt32ToFixed64();
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
uint16_t *riDividedOnDiClause = (uint16_t *) my_malloc(graph->num_vertices * sizeof(uint16_t));
// uint64_t* outDegreesFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t));
// uint64_t* pageRanksFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", " ---->>> CAPI <<<----");
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull FP_16 (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
// ********************************************************************************************
// *************** MAP CSR DataStructure **************
// ********************************************************************************************
wedGraphCSR = mapGraphCSRToWED((struct GraphCSR *)graph);
wedGraphCSR->auxiliary1 = riDividedOnDiClause;
wedGraphCSR->auxiliary2 = pageRanksNext;
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUGraphCSR(&afu, wedGraphCSR);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((arguments->cu_config << 32) | (arguments->ker_numThreads));
afu_status.cu_config_2 = arguments->cu_config_2; // non zero CU triggers the AFU to work
afu_status.cu_stop = wedGraphCSR->num_vertices; // stop condition once all vertices processed
startAFU(&afu, &afu_status);
// ********************************************************************************************
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices->out_degree[v])
riDividedOnDiClause[v] = FloatToFixed16(stats->pageRanks[v] / graph->vertices->out_degree[v]);
else
riDividedOnDiClause[v] = 0.0f;
}
// Stop(timer_inner);
// printf("|A %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
// ********************************************************************************************
// *************** START CU **************
startCU(&afu, &afu_status);
// ********************************************************************************************
// ********************************************************************************************
// *************** WAIT AFU **************
waitAFU(&afu, &afu_status);
// ********************************************************************************************
// Stop(timer_inner);
// printf("|B %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * Fixed16ToFloat(pageRanksNext[v]));
stats->pageRanks[v] = nextPageRank;
// pageRanksFP[v] = FloatToFixed(nextPageRank);
pageRanksNext[v] = 0;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
// ********************************************************************************************
// *************** Releasing AFU **************
releaseAFU(&afu);
// ********************************************************************************************
free(timer);
free(timer_inner);
free(riDividedOnDiClause);
free(pageRanksNext);
free(wedGraphCSR);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPullFixedPoint8BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
double error_total = 0.0;
uint32_t j;
uint32_t v;
uint32_t u;
uint32_t degree;
uint32_t edge_idx;
uint32_t activeVertices = 0;
//CAPI variables
struct cxl_afu_h *afu;
struct WEDGraphCSR *wedGraphCSR;
// float init_pr = 1.0f / (float)graph->num_vertices;
// uint64_t stats->base_pr_fp = FloatToFixed64(stats->base_pr);
// uint64_t epsilon_fp = DoubleToFixed64(arguments->epsilon);
// uint64_t num_vertices_fp = UInt32ToFixed64();
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
uint8_t *riDividedOnDiClause = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
// uint64_t* outDegreesFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t));
// uint64_t* pageRanksFP = (uint64_t*) my_malloc(graph->num_vertices*sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", " ---->>> CAPI <<<----");
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull FP_8 (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
// ********************************************************************************************
// *************** MAP CSR DataStructure **************
// ********************************************************************************************
wedGraphCSR = mapGraphCSRToWED((struct GraphCSR *)graph);
wedGraphCSR->auxiliary1 = riDividedOnDiClause;
wedGraphCSR->auxiliary2 = pageRanksNext;
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUGraphCSR(&afu, wedGraphCSR);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((arguments->cu_config << 32) | (arguments->ker_numThreads));
afu_status.cu_config_2 = arguments->cu_config_2; // non zero CU triggers the AFU to work
afu_status.cu_stop = wedGraphCSR->num_vertices; // stop condition once all vertices processed
startAFU(&afu, &afu_status);
// ********************************************************************************************
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices->out_degree[v])
riDividedOnDiClause[v] = FloatToFixed8(stats->pageRanks[v] / graph->vertices->out_degree[v]);
else
riDividedOnDiClause[v] = 0.0f;
}
// Stop(timer_inner);
// printf("|A %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
// ********************************************************************************************
// *************** START CU **************
startCU(&afu, &afu_status);
// ********************************************************************************************
// ********************************************************************************************
// *************** WAIT AFU **************
waitAFU(&afu, &afu_status);
// ********************************************************************************************
// Stop(timer_inner);
// printf("|B %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * Fixed8ToFloat(pageRanksNext[v]));
stats->pageRanks[v] = nextPageRank;
// pageRanksFP[v] = FloatToFixed(nextPageRank);
pageRanksNext[v] = 0;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// ********************************************************************************************
// *************** Releasing AFU **************
releaseAFU(&afu);
// ********************************************************************************************
free(timer);
free(timer_inner);
free(riDividedOnDiClause);
free(pageRanksNext);
free(wedGraphCSR);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPushFixedPointGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
double error_total = 0.0;
// uint32_t i;
uint32_t v;
// double error = 0;
uint32_t activeVertices = 0;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
// uint64_t stats->base_prFP = DoubleToFixed(stats->base_pr);
// uint64_t stats->dampFP = DoubleToFixed(stats->damp);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
// uint32_t* pageRanksFP = (uint32_t*) my_malloc(graph->num_vertices*sizeof(uint32_t));
uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Push FP (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)
for(v = 0; v < graph->num_vertices; v++)
{
// pageRanksFP[v]=stats->base_prFP;
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices->out_degree[v])
{
riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices->out_degree[v]);
// riDividedOnDiClause[v] = DIVFixed64V1(pageRanksFP[v],UInt64ToFixed(graph->vertices[v].out_degree));
}
else
riDividedOnDiClause[v] = 0.0f;
}
#pragma omp parallel for default(none) schedule(dynamic, 1024) private(v) shared(stats,graph,pageRanksNext,riDividedOnDiClause) num_threads(arguments->ker_numThreads)
for(v = 0; v < graph->num_vertices; v++)
{
uint32_t degree = graph->vertices->out_degree[v];
uint32_t edge_idx = graph->vertices->edges_idx[v];
// uint32_t tid = omp_get_thread_num();
uint32_t j;
for(j = edge_idx ; j < (edge_idx + degree) ; j++)
{
uint32_t u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);
#pragma omp atomic update
pageRanksNext[u] += riDividedOnDiClause[v];
}
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));
stats->pageRanks[v] = nextPageRank;
// pageRanksFP[v] = FloatToFixed(nextPageRank);
pageRanksNext[v] = 0;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
free(timer);
free(timer_inner);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
//done by mohannad Ibranim
//v_0: No need for next iteration's quantization parameters. (eqn 1)
struct PageRankStats *pageRankPullQuant32BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
//QUANT_SCALE = 32;
uint32_t j;
uint32_t v;
uint32_t u;
uint32_t degree;
uint32_t edge_idx;
uint32_t activeVertices = 0;
double error_total = 0.0;
//CAPI variables
struct cxl_afu_h *afu;
struct WEDGraphCSR *wedGraphCSR;
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
uint32_t *riDividedOnDiClause_quant = (uint32_t *)my_malloc(graph->num_vertices * sizeof(uint32_t));
uint64_t *pageRanksNext_quant = (uint64_t *)my_malloc(graph->num_vertices * sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", " ---->>> CAPI <<<----");
printf(" -----------------------------------------------------\n");
printf("| %-30s %-19s| \n", "Starting Page Rank Pull Quant_32", "(tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
// ********************************************************************************************
// *************** MAP CSR DataStructure **************
// ********************************************************************************************
wedGraphCSR = mapGraphCSRToWED((struct GraphCSR *)graph);
wedGraphCSR->auxiliary1 = riDividedOnDiClause_quant;
wedGraphCSR->auxiliary2 = pageRanksNext_quant;
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUGraphCSR(&afu, wedGraphCSR);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((arguments->cu_config << 32) | (arguments->ker_numThreads));
afu_status.cu_config_2 = arguments->cu_config_2; // non zero CU triggers the AFU to work
afu_status.cu_stop = wedGraphCSR->num_vertices; // stop condition once all vertices processed
startAFU(&afu, &afu_status);
// ********************************************************************************************
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext_quant)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext_quant[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices->out_degree[v])
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];
else
riDividedOnDiClause[v] = 0.0f;
}
//1. Extract the quantization parameters from riDividedOnDiClause[]
struct quant_params_32 rDivD_params;
getMinMax_32(&rDivD_params, riDividedOnDiClause, graph->num_vertices);
rDivD_params.scale = GetScale_32(rDivD_params.min, rDivD_params.max);
rDivD_params.zero = 0;
// printf("Iter %d quant parameters:\nMin = %.16f,\tMax = %.16f\nScale = %.24f,\tZero = %u\n",
// stats->iterations,rDivD_params.min,rDivD_params.max,rDivD_params.scale,rDivD_params.zero);
// printf(".........................................................\n");
//2. Quantize riDividedOnDiClause[]
#pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)
for(v = 0; v < graph->num_vertices; v++)
{
riDividedOnDiClause_quant[v] = quantize_32(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);
}
// Stop(timer_inner);
// printf("|A %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
// ********************************************************************************************
// *************** START CU **************
startCU(&afu, &afu_status);
// ********************************************************************************************
// ********************************************************************************************
// *************** WAIT AFU **************
waitAFU(&afu, &afu_status);
// ********************************************************************************************
// Stop(timer_inner);
// printf("|B %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
//uint64_t temp_degree = 0;
#pragma omp parallel for private(v) shared(arguments,pageRanksNext_quant,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + stats->damp * (rDivD_params.scale * pageRanksNext_quant[v]);
stats->pageRanks[v] = nextPageRank;
pageRanksNext_quant[v] = 0.0f;
double error = fabs(nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
//temp_degree += vertices[v].in_degree;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// ********************************************************************************************
// *************** Releasing AFU **************
releaseAFU(&afu);
// ********************************************************************************************
free(timer);
free(timer_inner);
free(pageRanksNext_quant);
free(riDividedOnDiClause);
free(riDividedOnDiClause_quant);
free(wedGraphCSR);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPullQuant16BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
//QUANT_SCALE = 32;
uint32_t j;
uint32_t v;
uint32_t u;
uint32_t degree;
uint32_t edge_idx;
uint32_t activeVertices = 0;
double error_total = 0.0;
//CAPI variables
struct cxl_afu_h *afu;
struct WEDGraphCSR *wedGraphCSR;
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
uint16_t *riDividedOnDiClause_quant = (uint16_t *)my_malloc(graph->num_vertices * sizeof(uint16_t));
uint64_t *pageRanksNext_quant = (uint64_t *)my_malloc(graph->num_vertices * sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", " ---->>> CAPI <<<----");
printf(" -----------------------------------------------------\n");
printf("| %-30s %-19s| \n", "Starting Page Rank Pull Quant_16", "(tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
// ********************************************************************************************
// *************** MAP CSR DataStructure **************
// ********************************************************************************************
wedGraphCSR = mapGraphCSRToWED((struct GraphCSR *)graph);
wedGraphCSR->auxiliary1 = riDividedOnDiClause_quant;
wedGraphCSR->auxiliary2 = pageRanksNext_quant;
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUGraphCSR(&afu, wedGraphCSR);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((arguments->cu_config << 32) | (arguments->ker_numThreads));
afu_status.cu_config_2 = arguments->cu_config_2; // non zero CU triggers the AFU to work
afu_status.cu_stop = wedGraphCSR->num_vertices; // stop condition once all vertices processed
startAFU(&afu, &afu_status);
// ********************************************************************************************
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext_quant)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext_quant[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices->out_degree[v])
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];
else
riDividedOnDiClause[v] = 0.0f;
}
//1. Extract the quantization parameters from riDividedOnDiClause[]
struct quant_params_16 rDivD_params;
getMinMax_16(&rDivD_params, riDividedOnDiClause, graph->num_vertices);
rDivD_params.scale = GetScale_16(rDivD_params.min, rDivD_params.max);
rDivD_params.zero = 0;
// printf("Iter %d quant parameters:\nMin = %.16f,\tMax = %.16f\nScale = %.24f,\tZero = %u\n",
// stats->iterations,rDivD_params.min,rDivD_params.max,rDivD_params.scale,rDivD_params.zero);
// printf(".........................................................\n");
//2. Quantize riDividedOnDiClause[]
#pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)
for(v = 0; v < graph->num_vertices; v++)
{
riDividedOnDiClause_quant[v] = quantize_16(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);
}
// Stop(timer_inner);
// printf("|A %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
// ********************************************************************************************
// *************** START CU **************
startCU(&afu, &afu_status);
// ********************************************************************************************
// ********************************************************************************************
// *************** WAIT AFU **************
waitAFU(&afu, &afu_status);
// ********************************************************************************************
// Stop(timer_inner);
// printf("|B %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
//uint64_t temp_degree = 0;
#pragma omp parallel for private(v) shared(arguments,pageRanksNext_quant,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + stats->damp * (rDivD_params.scale * pageRanksNext_quant[v]);
stats->pageRanks[v] = nextPageRank;
pageRanksNext_quant[v] = 0.0f;
double error = fabs(nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
//temp_degree += vertices[v].in_degree;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// ********************************************************************************************
// *************** Releasing AFU **************
releaseAFU(&afu);
// ********************************************************************************************
free(timer);
free(timer_inner);
free(pageRanksNext_quant);
free(riDividedOnDiClause);
free(riDividedOnDiClause_quant);
free(wedGraphCSR);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPullQuant8BitGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
//QUANT_SCALE = 32;
uint32_t j;
uint32_t v;
uint32_t u;
uint32_t degree;
uint32_t edge_idx;
uint32_t activeVertices = 0;
double error_total = 0.0;
//CAPI variables
struct cxl_afu_h *afu;
struct WEDGraphCSR *wedGraphCSR;
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
uint8_t *riDividedOnDiClause_quant = (uint8_t *)my_malloc(graph->num_vertices * sizeof(uint8_t));
uint64_t *pageRanksNext_quant = (uint64_t *)my_malloc(graph->num_vertices * sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", " ---->>> CAPI <<<----");
printf(" -----------------------------------------------------\n");
printf("| %-30s %-19s| \n", "Starting Page Rank Pull Quant_8", "(tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
// ********************************************************************************************
// *************** MAP CSR DataStructure **************
// ********************************************************************************************
wedGraphCSR = mapGraphCSRToWED((struct GraphCSR *)graph);
wedGraphCSR->auxiliary1 = riDividedOnDiClause_quant;
wedGraphCSR->auxiliary2 = pageRanksNext_quant;
// ********************************************************************************************
// ********************************************************************************************
// *************** Setup AFU **************
// ********************************************************************************************
setupAFUGraphCSR(&afu, wedGraphCSR);
struct AFUStatus afu_status = {0};
afu_status.afu_config = arguments->afu_config;
afu_status.afu_config_2 = arguments->afu_config_2;
afu_status.cu_config = arguments->cu_config; // non zero CU triggers the AFU to work
afu_status.cu_config = ((arguments->cu_config << 32) | (arguments->ker_numThreads));
afu_status.cu_config_2 = arguments->cu_config_2; // non zero CU triggers the AFU to work
afu_status.cu_stop = wedGraphCSR->num_vertices; // stop condition once all vertices processed
startAFU(&afu, &afu_status);
// ********************************************************************************************
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext_quant)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext_quant[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices->out_degree[v])
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];
else
riDividedOnDiClause[v] = 0.0f;
}
//1. Extract the quantization parameters from riDividedOnDiClause[]
struct quant_params_8 rDivD_params;
getMinMax_8(&rDivD_params, riDividedOnDiClause, graph->num_vertices);
rDivD_params.scale = GetScale_8(rDivD_params.min, rDivD_params.max);
rDivD_params.zero = 0;
// printf("Iter %d quant parameters:\nMin = %.16f,\tMax = %.16f\nScale = %.24f,\tZero = %u\n",
// stats->iterations,rDivD_params.min,rDivD_params.max,rDivD_params.scale,rDivD_params.zero);
// printf(".........................................................\n");
//2. Quantize riDividedOnDiClause[]
#pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,graph)
for(v = 0; v < graph->num_vertices; v++)
{
riDividedOnDiClause_quant[v] = quantize_8(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);
}
// Stop(timer_inner);
// printf("|A %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
// ********************************************************************************************
// *************** START CU **************
startCU(&afu, &afu_status);
// ********************************************************************************************
// ********************************************************************************************
// *************** WAIT AFU **************
waitAFU(&afu, &afu_status);
// ********************************************************************************************
// Stop(timer_inner);
// printf("|B %-9u | %-8u | %-15.13lf | %-9f | \n",stats->iterations, activeVertices,error_total, Seconds(timer_inner));
// Start(timer_inner);
//uint64_t temp_degree = 0;
#pragma omp parallel for private(v) shared(arguments,pageRanksNext_quant,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + stats->damp * (rDivD_params.scale * pageRanksNext_quant[v]);
stats->pageRanks[v] = nextPageRank;
pageRanksNext_quant[v] = 0.0f;
double error = fabs(nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
//temp_degree += vertices[v].in_degree;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// ********************************************************************************************
// *************** Releasing AFU **************
releaseAFU(&afu);
// ********************************************************************************************
free(timer);
free(timer_inner);
free(pageRanksNext_quant);
free(riDividedOnDiClause);
free(riDividedOnDiClause_quant);
free(wedGraphCSR);
stats->error_total = error_total;
return stats;
}
//done by mohannad Ibranim
struct PageRankStats *pageRankPushQuantGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
//QUANT_SCALE = 16;
// uint32_t i;
uint32_t v;
uint32_t activeVertices = 0;
double error_total = 0.0;
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
uint32_t *riDividedOnDiClause_quant = (uint32_t *)my_malloc(graph->num_vertices * sizeof(uint32_t));
printf(" -----------------------------------------------------\n");
printf("| %-30s %-19s| \n", "Starting Page Rank Push Quant_32", "(tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices->out_degree[v])
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];
else
riDividedOnDiClause[v] = 0.0f;
}
//1. Extract the quantization parameters from riDividedOnDiClause[]
struct quant_params rDivD_params;
getMinMax(&rDivD_params, riDividedOnDiClause, graph->num_vertices);
rDivD_params.scale = GetScale(rDivD_params.min, rDivD_params.max);
rDivD_params.zero = GetZeroPoint(rDivD_params.max, rDivD_params.scale);
// printf("Itaration %d's quant parameters:\n\tMin = %f,\tMax = %f\nScale = %f,\tZero = %d\n......................",
// stats->iterations,rDivD_params.min,rDivD_params.max,rDivD_params.scale,rDivD_params.zero);
//2. Quantize riDividedOnDiClause[]
#pragma omp parallel for private(v) shared(riDividedOnDiClause_quant,riDividedOnDiClause,stats,graph)
for(v = 0; v < graph->num_vertices; v++)
{
riDividedOnDiClause_quant[v] = quantize(riDividedOnDiClause[v], rDivD_params.scale, rDivD_params.zero);
}
#pragma omp parallel for default(none) private(v) shared(stats,rDivD_params,riDividedOnDiClause_quant,graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)
for(v = 0; v < graph->num_vertices; v++)
{
uint32_t degree = graph->vertices->out_degree[v];
uint32_t edge_idx = graph->vertices->edges_idx[v];
uint32_t j;
for(j = edge_idx ; j < (edge_idx + degree) ; j++)
{
uint32_t u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);
#pragma omp atomic update
pageRanksNext[u] += rDivD_params.scale * (riDividedOnDiClause_quant[v] - rDivD_params.zero);
}
}
#pragma omp parallel for private(v) shared(arguments, stats,pageRanksNext) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + stats->damp * pageRanksNext[v];
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0;
double error = fabs(nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, Seconds(timer));
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(timer);
free(timer_inner);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankDataDrivenPullGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
double error_total = 0.0;
uint32_t i;
uint32_t v;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint8_t *workListCurr = NULL;
uint8_t *workListNext = NULL;
int activeVertices = 0;
workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
resetWorkList(workListNext, graph->num_vertices);
resetWorkList(workListCurr, graph->num_vertices);
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull DD (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
Start(timer_inner);
#pragma omp parallel for reduction(+:activeVertices)
for(i = 0; i < graph->num_vertices; i++)
{
workListNext[i] = 1;
activeVertices++;
}
swapWorkLists(&workListNext, &workListCurr);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner));
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices->out_degree[v])
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices->out_degree[v];
else
riDividedOnDiClause[v] = 0.0f;
}
#pragma omp parallel for default(none) shared(arguments,riDividedOnDiClause,sorted_edges_array,vertices,workListCurr,workListNext,stats,graph) private(v) reduction(+:activeVertices,error_total) schedule(dynamic, 1024) num_threads(arguments->ker_numThreads)
for(v = 0; v < graph->num_vertices; v++)
{
if(workListCurr[v])
{
uint32_t edge_idx;
uint32_t degree;
uint32_t j;
uint32_t u;
double error = 0;
float nodeIncomingPR = 0;
degree = vertices->out_degree[v]; // when directed we use inverse graph out degree means in degree
edge_idx = vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + degree) ; j++)
{
u = EXTRACT_VALUE(sorted_edges_array[j]);
nodeIncomingPR += riDividedOnDiClause[u]; // sum (PRi/outDegree(i))
}
float oldPageRank = stats->pageRanks[v];
float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);
error = fabs(newPageRank - oldPageRank);
error_total += error / graph->num_vertices;
if(error >= arguments->epsilon)
{
stats->pageRanks[v] = newPageRank;
degree = graph->vertices->out_degree[v];
edge_idx = graph->vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + degree) ; j++)
{
u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);
#pragma omp atomic write
workListNext[u] = 1;
// uint8_t old_val = workListNext[u];
// if(!old_val){
// __sync_bool_compare_and_swap(&workListNext[u], 0, 1);
// }
}
activeVertices++;
}
}
}
// activeVertices = getNumOfSetBits(workListNext);
swapWorkLists(&workListNext, &workListCurr);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(workListCurr);
free(workListNext);
free(timer);
free(timer_inner);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankDataDrivenPushGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
double error_total = 0.0;
uint32_t v;
uint32_t edge_idx;
uint32_t degree;
uint32_t j;
uint32_t u;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint8_t *workListCurr = NULL;
uint8_t *workListNext = NULL;
int activeVertices = 0;
workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
resetWorkList(workListNext, graph->num_vertices);
resetWorkList(workListCurr, graph->num_vertices);
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *aResiduals = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Push DD (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
Start(timer_inner);
#pragma omp parallel for private(edge_idx,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
aResiduals[v] = 0.0;
workListCurr[v] = 1;
workListNext[v] = 0;
activeVertices++;
degree = vertices->out_degree[v]; // when directed we use inverse graph out degree means in degree
edge_idx = vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + degree) ; j++)
{
u = EXTRACT_VALUE(sorted_edges_array[j]);
if(graph->vertices->out_degree[u])
aResiduals[v] += 1.0f / graph->vertices->out_degree[u]; // sum (PRi/outDegree(i))
}
aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];
}
Stop(timer_inner);
printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner));
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for default(none) private(edge_idx,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) num_threads(arguments->ker_numThreads)
for(v = 0; v < graph->num_vertices; v++)
{
if(workListCurr[v])
{
float oldPageRank = stats->pageRanks[v];
float newPageRank = aResiduals[v] + stats->pageRanks[v];
error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);
// #pragma omp atomic write
stats->pageRanks[v] = newPageRank;
degree = graph->vertices->out_degree[v];
float delta = stats->damp * (aResiduals[v] / degree);
edge_idx = graph->vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + degree) ; j++)
{
u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);
float prevResidual = 0.0f;
prevResidual = aResiduals[u];
#pragma omp atomic update
aResiduals[u] += delta;
if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))
{
activeVertices++;
if(!workListNext[u])
{
// #pragma omp atomic write
workListNext[u] = 1;
}
}
}
aResiduals[v] = 0.0f;
}
}
// activeVertices = getNumOfSetBits(workListNext);
swapWorkLists(&workListNext, &workListCurr);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(workListCurr);
free(workListNext);
free(timer);
free(timer_inner);
free(aResiduals);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankDataDrivenPullPushGraphCSR(struct Arguments *arguments, struct GraphCSR *graph)
{
double error_total = 0.0;
uint32_t v;
uint32_t edge_idx;
uint32_t degree;
uint32_t j;
uint32_t u;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphCSR(graph);
struct Vertex *vertices = NULL;
uint32_t *sorted_edges_array = NULL;
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint8_t *workListCurr = NULL;
uint8_t *workListNext = NULL;
int activeVertices = 0;
workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
resetWorkList(workListNext, graph->num_vertices);
resetWorkList(workListCurr, graph->num_vertices);
#if DIRECTED
vertices = graph->inverse_vertices;
sorted_edges_array = graph->inverse_sorted_edges_array->edges_array_dest;
#else
vertices = graph->vertices;
sorted_edges_array = graph->sorted_edges_array->edges_array_dest;
#endif
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *aResiduals = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull-Push DD (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
Start(timer_inner);
#pragma omp parallel for private(edge_idx,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
aResiduals[v] = 0.0f;
workListCurr[v] = 1;
workListNext[v] = 0;
activeVertices++;
degree = vertices->out_degree[v]; // when directed we use inverse graph out degree means in degree
edge_idx = vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + degree) ; j++)
{
u = EXTRACT_VALUE(sorted_edges_array[j]);
if(graph->vertices->out_degree[u])
aResiduals[v] += 1.0f / graph->vertices->out_degree[u]; // sum (PRi/outDegree(i))
}
aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];
}
Stop(timer_inner);
printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner));
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for default(none) private(edge_idx,degree,v,j,u) shared(stats,vertices,sorted_edges_array,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024) num_threads(arguments->ker_numThreads)
for(v = 0; v < graph->num_vertices; v++)
{
if(workListCurr[v])
{
float nodeIncomingPR = 0.0f;
degree = vertices->out_degree[v];
edge_idx = vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + degree) ; j++)
{
u = EXTRACT_VALUE(sorted_edges_array[j]);
nodeIncomingPR += stats->pageRanks[u] / graph->vertices->out_degree[u];
}
float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);
float oldPageRank = stats->pageRanks[v];
// float newPageRank = aResiduals[v]+pageRanks[v];
error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);
#pragma omp atomic write
stats->pageRanks[v] = newPageRank;
degree = graph->vertices->out_degree[v];
float delta = stats->damp * (aResiduals[v] / degree);
edge_idx = graph->vertices->edges_idx[v];
for(j = edge_idx ; j < (edge_idx + degree) ; j++)
{
u = EXTRACT_VALUE(graph->sorted_edges_array->edges_array_dest[j]);
float prevResidual = 0.0f;
prevResidual = aResiduals[u];
#pragma omp atomic update
aResiduals[u] += delta;
if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))
{
activeVertices++;
aResiduals[u] += delta;
if(!workListNext[u])
{
workListNext[u] = 1;
}
}
}
aResiduals[v] = 0.0f;
}
}
// activeVertices = getNumOfSetBits(workListNext);
swapWorkLists(&workListNext, &workListCurr);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(workListCurr);
free(workListNext);
free(timer);
free(timer_inner);
free(aResiduals);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
// float* pageRankDataDrivenPullFixedPointGraphCSR(struct Arguments *arguments, struct GraphCSR* graph){
// }
// float* pageRankDataDrivenPushFixedPointGraphCSR(struct Arguments *arguments, struct GraphCSR* graph){
// }
// float* pageRankDataDrivenPullPushFixedPointGraphCSR(struct Arguments *arguments, struct GraphCSR* graph){
// }
// ********************************************************************************************
// *************** ArrayList DataStructure **************
// ********************************************************************************************
struct PageRankStats *pageRankGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
struct PageRankStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = pageRankPullGraphAdjArrayList(arguments, graph);
break;
case 1: // push
stats = pageRankPushGraphAdjArrayList(arguments, graph);
break;
case 2: // pull
stats = pageRankPullFixedPointGraphAdjArrayList(arguments, graph);
break;
case 3: // push
stats = pageRankPushFixedPointGraphAdjArrayList(arguments, graph);
break;
case 4: // pull
stats = pageRankDataDrivenPullGraphAdjArrayList(arguments, graph);
break;
case 5: // push
stats = pageRankDataDrivenPushGraphAdjArrayList(arguments, graph);
break;
case 6: // pullpush
stats = pageRankDataDrivenPullPushGraphAdjArrayList(arguments, graph);
break;
default:// push
stats = pageRankPullGraphAdjArrayList(arguments, graph);
break;
}
return stats;
}
struct PageRankStats *pageRankPullGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
double error_total = 0.0;
uint32_t j;
uint32_t v;
uint32_t u;
uint32_t degree;
uint32_t activeVertices = 0;
struct EdgeList *Nodes;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices[v].out_degree)
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;
else
riDividedOnDiClause[v] = 0.0f;
}
#pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)
for(v = 0; v < graph->num_vertices; v++)
{
float nodeIncomingPR = 0.0f;
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->edges_array_dest[j];
nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;
}
pageRanksNext[v] = nodeIncomingPR;
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// printf(" -----------------------------------------------------\n");
// printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total);
// printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(timer);
free(timer_inner);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPushGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
double error_total = 0.0;
uint32_t i;
uint32_t v;
// double error = 0;
uint32_t activeVertices = 0;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct EdgeList *Nodes;
omp_lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t));
#pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)
for (i = 0; i < graph->num_vertices; i++)
{
omp_init_lock(&(vertex_lock[i]));
}
float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Push (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0.0f;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices[v].out_degree)
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;
else
riDividedOnDiClause[v] = 0.0f;
}
#pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)
for(v = 0; v < graph->num_vertices; v++)
{
Nodes = graph->vertices[v].outNodes;
uint32_t degree = graph->vertices[v].out_degree;
// uint32_t tid = omp_get_thread_num();
uint32_t j;
for(j = 0 ; j < (degree) ; j++)
{
uint32_t u = Nodes->edges_array_dest[j];
// omp_set_lock(&(vertex_lock[u]));
// pageRanksNext[u] += riDividedOnDiClause[v];
// omp_unset_lock((&vertex_lock[u]));
#pragma omp atomic update
pageRanksNext[u] += riDividedOnDiClause[v];
// __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED);
// printf("tid %u degree %u edge_idx %u v %u u %u \n",tid,degree,edge_idx,v,u );
// addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);
}
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
#pragma omp parallel for
for (i = 0; i < graph->num_vertices; i++)
{
omp_destroy_lock(&(vertex_lock[i]));
}
free(timer);
free(timer_inner);
free(vertex_lock);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPullFixedPointGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
double error_total = 0.0;
uint32_t j;
uint32_t v;
uint32_t u;
uint32_t degree;
uint32_t activeVertices = 0;
struct EdgeList *Nodes;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull FP (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices[v].out_degree)
riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree);
else
riDividedOnDiClause[v] = 0.0f;
}
#pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)
for(v = 0; v < graph->num_vertices; v++)
{
float nodeIncomingPR = 0.0f;
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->edges_array_dest[j];
nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;
}
pageRanksNext[v] = nodeIncomingPR;
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// printf(" -----------------------------------------------------\n");
// printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total);
// printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(timer);
free(timer_inner);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPushFixedPointGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
double error_total = 0.0;
uint32_t i;
uint32_t v;
// double error = 0;
uint32_t activeVertices = 0;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct EdgeList *Nodes;
omp_lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t));
#pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)
for (i = 0; i < graph->num_vertices; i++)
{
omp_init_lock(&(vertex_lock[i]));
}
uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Push FP (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0.0f;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices[v].out_degree)
riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree);
else
riDividedOnDiClause[v] = 0.0f;
}
#pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)
for(v = 0; v < graph->num_vertices; v++)
{
Nodes = graph->vertices[v].outNodes;
uint32_t degree = graph->vertices[v].out_degree;
// uint32_t tid = omp_get_thread_num();
uint32_t j;
for(j = 0 ; j < (degree) ; j++)
{
uint32_t u = Nodes->edges_array_dest[j];
// omp_set_lock(&(vertex_lock[u]));
// pageRanksNext[u] += riDividedOnDiClause[v];
// omp_unset_lock((&vertex_lock[u]));
#pragma omp atomic update
pageRanksNext[u] += riDividedOnDiClause[v];
// __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED);
// printf("tid %u degree %u edge_idx %u v %u u %u \n",tid,degree,edge_idx,v,u );
// addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);
}
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
#pragma omp parallel for
for (i = 0; i < graph->num_vertices; i++)
{
omp_destroy_lock(&(vertex_lock[i]));
}
free(timer);
free(timer_inner);
free(vertex_lock);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankDataDrivenPullGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
double error_total = 0.0;
uint32_t i;
uint32_t v;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint8_t *workListCurr = NULL;
uint8_t *workListNext = NULL;
int activeVertices = 0;
struct EdgeList *Nodes;
workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
resetWorkList(workListNext, graph->num_vertices);
resetWorkList(workListCurr, graph->num_vertices);
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull DD (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
Start(timer_inner);
#pragma omp parallel for reduction(+:activeVertices)
for(i = 0; i < graph->num_vertices; i++)
{
workListNext[i] = 1;
activeVertices++;
}
swapWorkLists(&workListNext, &workListCurr);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner));
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices[v].out_degree)
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;
else
riDividedOnDiClause[v] = 0.0f;
}
#pragma omp parallel for default(none) shared(arguments,riDividedOnDiClause,workListCurr,workListNext,stats,graph) private(v,Nodes) reduction(+:activeVertices,error_total) schedule(dynamic, 1024)
for(v = 0; v < graph->num_vertices; v++)
{
if(workListCurr[v])
{
uint32_t degree;
uint32_t j;
uint32_t u;
double error = 0;
float nodeIncomingPR = 0;
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->edges_array_dest[j];
nodeIncomingPR += riDividedOnDiClause[u]; // sum (PRi/outDegree(i))
}
float oldPageRank = stats->pageRanks[v];
float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);
error = fabs(newPageRank - oldPageRank);
error_total += error / graph->num_vertices;
if(error >= arguments->epsilon)
{
stats->pageRanks[v] = newPageRank;
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->edges_array_dest[j];
#pragma omp atomic write
workListNext[u] = 1;
// uint8_t old_val = workListNext[u];
// if(!old_val){
// __sync_bool_compare_and_swap(&workListNext[u], 0, 1);
// }
}
activeVertices++;
}
}
}
// activeVertices = getNumOfSetBits(workListNext);
swapWorkLists(&workListNext, &workListCurr);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(workListCurr);
free(workListNext);
free(timer);
free(timer_inner);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankDataDrivenPushGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
double error_total = 0.0;
uint32_t v;
uint32_t degree;
uint32_t j;
uint32_t u;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint8_t *workListCurr = NULL;
uint8_t *workListNext = NULL;
int activeVertices = 0;
struct EdgeList *Nodes;
workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
resetWorkList(workListNext, graph->num_vertices);
resetWorkList(workListCurr, graph->num_vertices);
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *aResiduals = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Push DD (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
Start(timer_inner);
#pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
aResiduals[v] = 0.0;
workListCurr[v] = 1;
workListNext[v] = 0;
activeVertices++;
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->edges_array_dest[j];
if(graph->vertices[u].out_degree)
aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i))
}
aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];
}
Stop(timer_inner);
printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner));
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)
for(v = 0; v < graph->num_vertices; v++)
{
if(workListCurr[v])
{
float oldPageRank = stats->pageRanks[v];
float newPageRank = aResiduals[v] + stats->pageRanks[v];
error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);
// #pragma omp atomic write
stats->pageRanks[v] = newPageRank;
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
float delta = stats->damp * (aResiduals[v] / degree);
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->edges_array_dest[j];
float prevResidual = 0.0f;
prevResidual = aResiduals[u];
#pragma omp atomic update
aResiduals[u] += delta;
if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))
{
activeVertices++;
if(!workListNext[u])
{
// #pragma omp atomic write
workListNext[u] = 1;
}
}
}
aResiduals[v] = 0.0f;
}
}
// activeVertices = getNumOfSetBits(workListNext);
swapWorkLists(&workListNext, &workListCurr);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(workListCurr);
free(workListNext);
free(timer);
free(timer_inner);
free(aResiduals);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankDataDrivenPullPushGraphAdjArrayList(struct Arguments *arguments, struct GraphAdjArrayList *graph)
{
double error_total = 0.0;
uint32_t v;
uint32_t degree;
uint32_t j;
uint32_t u;
struct EdgeList *Nodes;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjArrayList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint8_t *workListCurr = NULL;
uint8_t *workListNext = NULL;
int activeVertices = 0;
workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
resetWorkList(workListNext, graph->num_vertices);
resetWorkList(workListCurr, graph->num_vertices);
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *aResiduals = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull-Push DD (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
Start(timer_inner);
#pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
aResiduals[v] = 0.0f;
workListCurr[v] = 1;
workListNext[v] = 0;
activeVertices++;
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->edges_array_dest[j];
if(graph->vertices[u].out_degree)
aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i))
}
aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];
}
Stop(timer_inner);
printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner));
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)
for(v = 0; v < graph->num_vertices; v++)
{
if(workListCurr[v])
{
float nodeIncomingPR = 0.0f;
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->edges_array_dest[j];
nodeIncomingPR += stats->pageRanks[u] / graph->vertices[u].out_degree;
}
float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);
float oldPageRank = stats->pageRanks[v];
// float newPageRank = aResiduals[v]+pageRanks[v];
error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);
#pragma omp atomic write
stats->pageRanks[v] = newPageRank;
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
float delta = stats->damp * (aResiduals[v] / degree);
for(j = 0 ; j < (degree) ; j++)
{
uint32_t u = Nodes->edges_array_dest[j];
float prevResidual = 0.0f;
prevResidual = aResiduals[u];
#pragma omp atomic update
aResiduals[u] += delta;
if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))
{
activeVertices++;
if(!workListNext[u])
{
workListNext[u] = 1;
}
}
}
aResiduals[v] = 0.0f;
}
}
// activeVertices = getNumOfSetBits(workListNext);
swapWorkLists(&workListNext, &workListCurr);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(workListCurr);
free(workListNext);
free(timer);
free(timer_inner);
free(aResiduals);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
// ********************************************************************************************
// *************** LinkedList DataStructure **************
// ********************************************************************************************
struct PageRankStats *pageRankGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
struct PageRankStats *stats = NULL;
switch (arguments->pushpull)
{
case 0: // pull
stats = pageRankPullGraphAdjLinkedList(arguments, graph);
break;
case 1: // push
stats = pageRankPushGraphAdjLinkedList(arguments, graph);
break;
case 2: // pull
stats = pageRankPullFixedPointGraphAdjLinkedList(arguments, graph);
break;
case 3: // push
stats = pageRankPushFixedPointGraphAdjLinkedList(arguments, graph);
break;
case 4: // pull
stats = pageRankDataDrivenPullGraphAdjLinkedList(arguments, graph);
break;
case 5: // push
stats = pageRankDataDrivenPushGraphAdjLinkedList(arguments, graph);
break;
case 6: // pullpush
stats = pageRankDataDrivenPullPushGraphAdjLinkedList(arguments, graph);
break;
default:// push
stats = pageRankPullGraphAdjLinkedList(arguments, graph);
break;
}
return stats;
}
struct PageRankStats *pageRankPullGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
double error_total = 0.0;
uint32_t j;
uint32_t v;
uint32_t u;
uint32_t degree;
uint32_t activeVertices = 0;
struct AdjLinkedListNode *Nodes;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices[v].out_degree)
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;
else
riDividedOnDiClause[v] = 0.0f;
}
#pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)
for(v = 0; v < graph->num_vertices; v++)
{
float nodeIncomingPR = 0.0f;
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->dest;
Nodes = Nodes->next;
nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;
}
pageRanksNext[v] = nodeIncomingPR;
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// printf(" -----------------------------------------------------\n");
// printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total);
// printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(timer);
free(timer_inner);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPushGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
double error_total = 0.0;
uint32_t i;
uint32_t v;
// double error = 0;
uint32_t activeVertices = 0;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct AdjLinkedListNode *Nodes;
omp_lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t));
#pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)
for (i = 0; i < graph->num_vertices; i++)
{
omp_init_lock(&(vertex_lock[i]));
}
float *pageRanksNext = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Push (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0.0f;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices[v].out_degree)
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;
else
riDividedOnDiClause[v] = 0.0f;
}
#pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)
for(v = 0; v < graph->num_vertices; v++)
{
Nodes = graph->vertices[v].outNodes;
uint32_t degree = graph->vertices[v].out_degree;
// uint32_t tid = omp_get_thread_num();
uint32_t j;
for(j = 0 ; j < (degree) ; j++)
{
uint32_t u = Nodes->dest;
Nodes = Nodes->next;
// omp_set_lock(&(vertex_lock[u]));
// pageRanksNext[u] += riDividedOnDiClause[v];
// omp_unset_lock((&vertex_lock[u]));
#pragma omp atomic update
pageRanksNext[u] += riDividedOnDiClause[v];
// __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED);
// printf("tid %u degree %u edge_idx %u v %u u %u \n",tid,degree,edge_idx,v,u );
// addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);
}
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * pageRanksNext[v]);
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
#pragma omp parallel for
for (i = 0; i < graph->num_vertices; i++)
{
omp_destroy_lock(&(vertex_lock[i]));
}
free(timer);
free(timer_inner);
free(vertex_lock);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPullFixedPointGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
double error_total = 0.0;
uint32_t j;
uint32_t v;
uint32_t u;
uint32_t degree;
uint32_t activeVertices = 0;
struct AdjLinkedListNode *Nodes;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull FP (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(graph,pageRanksNext)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
error_total = 0;
activeVertices = 0;
Start(timer_inner);
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices[v].out_degree)
riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree);
else
riDividedOnDiClause[v] = 0.0f;
}
#pragma omp parallel for reduction(+ : error_total,activeVertices) private(v,j,u,degree,Nodes) schedule(dynamic, 1024)
for(v = 0; v < graph->num_vertices; v++)
{
float nodeIncomingPR = 0.0f;
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->dest;
Nodes = Nodes->next;
nodeIncomingPR += riDividedOnDiClause[u]; // stats->pageRanks[v]/graph->vertices[v].out_degree;
}
pageRanksNext[v] = nodeIncomingPR;
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// printf(" -----------------------------------------------------\n");
// printf("| %-10s | %-8lf | %-15s | %-9s | \n","PR Sum ",sum, stats->iterations, stats->time_total);
// printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(timer);
free(timer_inner);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankPushFixedPointGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
double error_total = 0.0;
uint32_t i;
uint32_t v;
// double error = 0;
uint32_t activeVertices = 0;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
struct AdjLinkedListNode *Nodes;
omp_lock_t *vertex_lock = (omp_lock_t *) my_malloc( graph->num_vertices * sizeof(omp_lock_t));
#pragma omp parallel for default(none) private(i) shared(graph,vertex_lock)
for (i = 0; i < graph->num_vertices; i++)
{
omp_init_lock(&(vertex_lock[i]));
}
uint64_t *pageRanksNext = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
uint64_t *riDividedOnDiClause = (uint64_t *) my_malloc(graph->num_vertices * sizeof(uint64_t));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Push FP (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
#pragma omp parallel for default(none) private(v) shared(pageRanksNext,graph)
for(v = 0; v < graph->num_vertices; v++)
{
pageRanksNext[v] = 0.0f;
}
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for private(v) shared(riDividedOnDiClause,stats,graph)
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices[v].out_degree)
riDividedOnDiClause[v] = DoubleToFixed64(stats->pageRanks[v] / graph->vertices[v].out_degree);
else
riDividedOnDiClause[v] = 0.0f;
}
#pragma omp parallel for default(none) private(v,Nodes) shared(graph,pageRanksNext,riDividedOnDiClause) schedule(dynamic, 1024)
for(v = 0; v < graph->num_vertices; v++)
{
Nodes = graph->vertices[v].outNodes;
uint32_t degree = graph->vertices[v].out_degree;
// uint32_t tid = omp_get_thread_num();
uint32_t j;
for(j = 0 ; j < (degree) ; j++)
{
uint32_t u = Nodes->dest;
Nodes = Nodes->next;
// omp_set_lock(&(vertex_lock[u]));
// pageRanksNext[u] += riDividedOnDiClause[v];
// omp_unset_lock((&vertex_lock[u]));
#pragma omp atomic update
pageRanksNext[u] += riDividedOnDiClause[v];
// __atomic_fetch_add(&pageRanksNext[u], riDividedOnDiClause[v], __ATOMIC_RELAXED);
// printf("tid %u degree %u edge_idx %u v %u u %u \n",tid,degree,edge_idx,v,u );
// addAtomicFloat(&pageRanksNext[u] , riDividedOnDiClause[v]);
}
}
#pragma omp parallel for private(v) shared(arguments, pageRanksNext,stats) reduction(+ : error_total, activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
float prevPageRank = stats->pageRanks[v];
float nextPageRank = stats->base_pr + (stats->damp * Fixed64ToDouble(pageRanksNext[v]));
stats->pageRanks[v] = nextPageRank;
pageRanksNext[v] = 0.0f;
double error = fabs( nextPageRank - prevPageRank);
error_total += (error / graph->num_vertices);
if(error >= arguments->epsilon)
{
activeVertices++;
}
}
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
#pragma omp parallel for
for (i = 0; i < graph->num_vertices; i++)
{
omp_destroy_lock(&(vertex_lock[i]));
}
free(timer);
free(timer_inner);
free(vertex_lock);
free(pageRanksNext);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankDataDrivenPullGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
double error_total = 0.0;
uint32_t i;
uint32_t v;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint8_t *workListCurr = NULL;
uint8_t *workListNext = NULL;
int activeVertices = 0;
struct AdjLinkedListNode *Nodes;
workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
resetWorkList(workListNext, graph->num_vertices);
resetWorkList(workListCurr, graph->num_vertices);
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull DD (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
Start(timer_inner);
#pragma omp parallel for reduction(+:activeVertices)
for(i = 0; i < graph->num_vertices; i++)
{
workListNext[i] = 1;
activeVertices++;
}
swapWorkLists(&workListNext, &workListCurr);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner));
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for
for(v = 0; v < graph->num_vertices; v++)
{
if(graph->vertices[v].out_degree)
riDividedOnDiClause[v] = stats->pageRanks[v] / graph->vertices[v].out_degree;
else
riDividedOnDiClause[v] = 0.0f;
}
#pragma omp parallel for default(none) shared(arguments, riDividedOnDiClause, workListCurr, workListNext, stats, graph) private(v,Nodes) reduction(+:activeVertices,error_total) schedule(dynamic, 1024)
for(v = 0; v < graph->num_vertices; v++)
{
if(workListCurr[v])
{
uint32_t degree;
uint32_t j;
uint32_t u;
double error = 0;
float nodeIncomingPR = 0;
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->dest;
Nodes = Nodes->next;
nodeIncomingPR += riDividedOnDiClause[u]; // sum (PRi/outDegree(i))
}
float oldPageRank = stats->pageRanks[v];
float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);
error = fabs(newPageRank - oldPageRank);
error_total += error / graph->num_vertices;
if(error >= arguments->epsilon)
{
stats->pageRanks[v] = newPageRank;
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->dest;
Nodes = Nodes->next;
#pragma omp atomic write
workListNext[u] = 1;
// uint8_t old_val = workListNext[u];
// if(!old_val){
// __sync_bool_compare_and_swap(&workListNext[u], 0, 1);
// }
}
activeVertices++;
}
}
}
// activeVertices = getNumOfSetBits(workListNext);
swapWorkLists(&workListNext, &workListCurr);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(workListCurr);
free(workListNext);
free(timer);
free(timer_inner);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankDataDrivenPushGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
double error_total = 0.0;
uint32_t v;
uint32_t degree;
uint32_t j;
uint32_t u;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint8_t *workListCurr = NULL;
uint8_t *workListNext = NULL;
int activeVertices = 0;
struct AdjLinkedListNode *Nodes;
workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
resetWorkList(workListNext, graph->num_vertices);
resetWorkList(workListCurr, graph->num_vertices);
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *aResiduals = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Push DD (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
Start(timer_inner);
#pragma omp parallel for private(Nodes,degree,v,j,u) shared(workListCurr,workListNext,aResiduals) reduction(+:activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
aResiduals[v] = 0.0;
workListCurr[v] = 1;
workListNext[v] = 0;
activeVertices++;
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->dest;
Nodes = Nodes->next;
if(graph->vertices[u].out_degree)
aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i))
}
aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];
}
Stop(timer_inner);
printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner));
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)
for(v = 0; v < graph->num_vertices; v++)
{
if(workListCurr[v])
{
float oldPageRank = stats->pageRanks[v];
float newPageRank = aResiduals[v] + stats->pageRanks[v];
error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);
// #pragma omp atomic write
stats->pageRanks[v] = newPageRank;
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
float delta = stats->damp * (aResiduals[v] / degree);
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->dest;
Nodes = Nodes->next;
float prevResidual = 0.0f;
prevResidual = aResiduals[u];
#pragma omp atomic update
aResiduals[u] += delta;
if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))
{
activeVertices++;
if(!workListNext[u])
{
// #pragma omp atomic write
workListNext[u] = 1;
}
}
}
aResiduals[v] = 0.0f;
}
}
// activeVertices = getNumOfSetBits(workListNext);
swapWorkLists(&workListNext, &workListCurr);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(workListCurr);
free(workListNext);
free(timer);
free(timer_inner);
free(aResiduals);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
}
struct PageRankStats *pageRankDataDrivenPullPushGraphAdjLinkedList(struct Arguments *arguments, struct GraphAdjLinkedList *graph)
{
double error_total = 0.0;
uint32_t v;
uint32_t degree;
uint32_t j;
uint32_t u;
struct AdjLinkedListNode *Nodes;
// float init_pr = 1.0f / (float)graph->num_vertices;
struct PageRankStats *stats = newPageRankStatsGraphAdjLinkedList(graph);
struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer));
struct Timer *timer_inner = (struct Timer *) malloc(sizeof(struct Timer));
uint8_t *workListCurr = NULL;
uint8_t *workListNext = NULL;
int activeVertices = 0;
workListCurr = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
workListNext = (uint8_t *) my_malloc(graph->num_vertices * sizeof(uint8_t));
resetWorkList(workListNext, graph->num_vertices);
resetWorkList(workListCurr, graph->num_vertices);
float *riDividedOnDiClause = (float *) my_malloc(graph->num_vertices * sizeof(float));
float *aResiduals = (float *) my_malloc(graph->num_vertices * sizeof(float));
printf(" -----------------------------------------------------\n");
printf("| %-51s | \n", "Starting Page Rank Pull-Push DD (tolerance/epsilon)");
printf(" -----------------------------------------------------\n");
printf("| %-51.13lf | \n", arguments->epsilon);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iteration", "Active", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
Start(timer);
Start(timer_inner);
#pragma omp parallel for private(Nodes,degree,v,j,u) shared(stats,workListCurr,workListNext,aResiduals) reduction(+:activeVertices)
for(v = 0; v < graph->num_vertices; v++)
{
aResiduals[v] = 0.0f;
workListCurr[v] = 1;
workListNext[v] = 0;
activeVertices++;
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->dest;
Nodes = Nodes->next;
if(graph->vertices[u].out_degree)
aResiduals[v] += 1.0f / graph->vertices[u].out_degree; // sum (PRi/outDegree(i))
}
aResiduals[v] = (1.0f - stats->damp) * stats->damp * aResiduals[v];
}
Stop(timer_inner);
printf("| %-10s | %-8u | %-15.13lf | %-9f | \n", "Init", activeVertices, error_total, Seconds(timer_inner));
for(stats->iterations = 0; stats->iterations < arguments->iterations; stats->iterations++)
{
Start(timer_inner);
error_total = 0;
activeVertices = 0;
#pragma omp parallel for default(none) private(Nodes,degree,v,j,u) shared(stats,arguments,graph,workListCurr,workListNext,aResiduals) reduction(+:error_total,activeVertices) schedule(dynamic,1024)
for(v = 0; v < graph->num_vertices; v++)
{
if(workListCurr[v])
{
float nodeIncomingPR = 0.0f;
#if DIRECTED // will look at the other neighbours if directed by using inverese edge list
Nodes = graph->vertices[v].inNodes;
degree = graph->vertices[v].in_degree;
#else
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
#endif
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->dest;
Nodes = Nodes->next;
nodeIncomingPR += stats->pageRanks[u] / graph->vertices[u].out_degree;
}
float newPageRank = stats->base_pr + (stats->damp * nodeIncomingPR);
float oldPageRank = stats->pageRanks[v];
// float newPageRank = aResiduals[v]+pageRanks[v];
error_total += fabs(newPageRank / graph->num_vertices - oldPageRank / graph->num_vertices);
#pragma omp atomic write
stats->pageRanks[v] = newPageRank;
Nodes = graph->vertices[v].outNodes;
degree = graph->vertices[v].out_degree;
float delta = stats->damp * (aResiduals[v] / degree);
for(j = 0 ; j < (degree) ; j++)
{
u = Nodes->dest;
Nodes = Nodes->next;
float prevResidual = 0.0f;
prevResidual = aResiduals[u];
#pragma omp atomic update
aResiduals[u] += delta;
if ((fabs(prevResidual + delta) >= arguments->epsilon) && (prevResidual <= arguments->epsilon))
{
activeVertices++;
if(!workListNext[u])
{
workListNext[u] = 1;
}
}
}
aResiduals[v] = 0.0f;
}
}
// activeVertices = getNumOfSetBits(workListNext);
swapWorkLists(&workListNext, &workListCurr);
resetWorkList(workListNext, graph->num_vertices);
Stop(timer_inner);
printf("| %-10u | %-8u | %-15.13lf | %-9f | \n", stats->iterations, activeVertices, error_total, Seconds(timer_inner));
if(activeVertices == 0)
break;
}// end iteration loop
double sum = 0.0f;
#pragma omp parallel for reduction(+:sum)
for(v = 0; v < graph->num_vertices; v++)
{
stats->pageRanks[v] = stats->pageRanks[v] / graph->num_vertices;
sum += stats->pageRanks[v];
}
Stop(timer);
stats->time_total = Seconds(timer);
printf(" -----------------------------------------------------\n");
printf("| %-10s | %-8s | %-15s | %-9s | \n", "Iterations", "PR Sum", "Error", "Time (S)");
printf(" -----------------------------------------------------\n");
printf("| %-10u | %-8lf | %-15.13lf | %-9f | \n", stats->iterations, sum, error_total, stats->time_total);
printf(" -----------------------------------------------------\n");
// pageRankPrint(pageRanks, graph->num_vertices);
free(workListCurr);
free(workListNext);
free(timer);
free(timer_inner);
free(aResiduals);
free(riDividedOnDiClause);
stats->error_total = error_total;
return stats;
} |
pst_fmt_plug.c | /* PST cracker patch for JtR. Hacked together during July of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>
*
* Optimizations and shift to pkzip CRC32 code done by JimF
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Uses code from crc32_fmt_plug.c written by JimF */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pst;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pst);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "crc32.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#include <omp.h>
#ifdef __MIC__
#ifndef OMP_SCALE
#define OMP_SCALE 1024
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 16384 // core i7 no HT
#endif
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define FORMAT_LABEL "PST"
#define FORMAT_NAME "custom CRC-32"
#define FORMAT_TAG "$pst$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 8
#define BINARY_SIZE 4
#define SALT_SIZE 0
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 256
static struct fmt_tests tests[] = {
{"$pst$a9290513", "openwall"}, /* "jfuck jw" works too ;) */
{"$pst$50e099bc", "password"},
{"$pst$00000000", ""},
{"$pst$e3da3318", "xxx"},
{"$pst$a655dd18", "XYz123"},
{"$pst$29b14070", "thisisalongstring"},
{"$pst$25b44615", "string with space"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out);
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
p = ciphertext + FORMAT_TAG_LEN;
if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra)
return 0;
return 1;
}
static void set_key(char *key, int index) {
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1);
}
static int cmp_all(void *binary, int count)
{
ARCH_WORD_32 crc=*((ARCH_WORD_32*)binary), i;
for (i = 0; i < count; ++i)
if (crc == crypt_out[i]) return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((ARCH_WORD_32*)binary) == crypt_out[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int i;
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 0; i < count; ++i) {
CRC32_t crc = 0;
unsigned char *p = (unsigned char*)saved_key[i];
while (*p)
crc = jtr_crc32(crc, *p++);
crypt_out[i] = crc;
}
return count;
}
static void *get_binary(char *ciphertext)
{
static ARCH_WORD_32 *out;
if (!out)
out = mem_alloc_tiny(sizeof(ARCH_WORD_32), MEM_ALIGN_WORD);
sscanf(&ciphertext[5], "%x", out);
return out;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int get_hash_0(int index) { return crypt_out[index] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index] & PH_MASK_6; }
struct fmt_main fmt_pst = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_TRUNC | FMT_8_BIT | FMT_NOT_EXACT,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__atan_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__atan_fp64_fp64)
// op(A') function: GB (_unop_tran__atan_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = atan (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = atan (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = atan (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATAN || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__atan_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = atan (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = atan (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__atan_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nmt_covar.c | #include "utils.h"
static nmt_binning_scheme *nmt_bins_copy(nmt_binning_scheme *b_or)
{
int ii;
nmt_binning_scheme *b=my_malloc(sizeof(nmt_binning_scheme));
b->n_bands=b_or->n_bands;
b->nell_list=my_malloc(b->n_bands*sizeof(int));
memcpy(b->nell_list,b_or->nell_list,b->n_bands*sizeof(int));
b->ell_list=my_malloc(b->n_bands*sizeof(int *));
b->w_list=my_malloc(b->n_bands*sizeof(flouble *));
for(ii=0;ii<b->n_bands;ii++) {
b->ell_list[ii]=my_malloc(b->nell_list[ii]*sizeof(int));
b->w_list[ii]=my_malloc(b->nell_list[ii]*sizeof(flouble));
memcpy(b->ell_list[ii],b_or->ell_list[ii],b->nell_list[ii]*sizeof(int));
memcpy(b->w_list[ii],b_or->w_list[ii],b->nell_list[ii]*sizeof(flouble));
}
return b;
}
nmt_covar_workspace *nmt_covar_workspace_init(nmt_workspace *wa,nmt_workspace *wb)
{
if((wa->nside!=wb->nside) || (wa->lmax!=wb->lmax))
report_error(NMT_ERROR_COVAR,"Can't compute covariance for fields with different resolutions\n");
if((wa->ncls!=1) || (wb->ncls!=1))
report_error(NMT_ERROR_COVAR,"Gaussian covariance only implemented for spin-0 fields\n");
nmt_covar_workspace *cw=my_malloc(sizeof(nmt_covar_workspace));
int ii;
int nside=wa->nside;
int npix=he_nside2npix(nside);
flouble *mask_a1b1=my_malloc(npix*sizeof(flouble));
flouble *mask_a1b2=my_malloc(npix*sizeof(flouble));
flouble *mask_a2b1=my_malloc(npix*sizeof(flouble));
flouble *mask_a2b2=my_malloc(npix*sizeof(flouble));
cw->nside=wa->nside;
cw->lmax_a=wa->lmax;
cw->lmax_b=wb->lmax;
cw->ncls_a=wa->ncls;
cw->ncls_b=wb->ncls;
cw->ncls_a=wa->ncls;
cw->bin_a=nmt_bins_copy(wa->bin);
cw->bin_b=nmt_bins_copy(wb->bin);
flouble *cl_mask_1122=my_malloc((cw->lmax_a+1)*sizeof(flouble));
flouble *cl_mask_1221=my_malloc((cw->lmax_a+1)*sizeof(flouble));
cw->xi_1122=my_malloc(cw->ncls_a*(cw->lmax_a+1)*sizeof(flouble *));
cw->xi_1221=my_malloc(cw->ncls_a*(cw->lmax_a+1)*sizeof(flouble *));
for(ii=0;ii<cw->ncls_a*(cw->lmax_a+1);ii++) {
cw->xi_1122[ii]=my_malloc(cw->ncls_b*(wb->lmax+1)*sizeof(flouble));
cw->xi_1221[ii]=my_malloc(cw->ncls_b*(wb->lmax+1)*sizeof(flouble));
}
cw->coupling_binned_a=gsl_matrix_alloc(cw->ncls_a*cw->bin_a->n_bands,cw->ncls_a*cw->bin_a->n_bands);
gsl_matrix_memcpy(cw->coupling_binned_a,wa->coupling_matrix_binned);
cw->coupling_binned_b=gsl_matrix_alloc(cw->ncls_b*cw->bin_b->n_bands,cw->ncls_b*cw->bin_b->n_bands);
gsl_matrix_memcpy(cw->coupling_binned_b,wb->coupling_matrix_binned);
cw->coupling_binned_perm_a=gsl_permutation_alloc(cw->ncls_a*cw->bin_a->n_bands);
gsl_permutation_memcpy(cw->coupling_binned_perm_a,wa->coupling_matrix_perm);
cw->coupling_binned_perm_b=gsl_permutation_alloc(cw->ncls_b*cw->bin_b->n_bands);
gsl_permutation_memcpy(cw->coupling_binned_perm_b,wb->coupling_matrix_perm);
he_map_product(nside,wa->mask1,wb->mask1,mask_a1b1);
he_map_product(nside,wa->mask1,wb->mask2,mask_a1b2);
he_map_product(nside,wa->mask2,wb->mask1,mask_a2b1);
he_map_product(nside,wa->mask2,wb->mask2,mask_a2b2);
he_anafast(&mask_a1b1,&mask_a2b2,0,0,&cl_mask_1122,wa->nside,cw->lmax_a,HE_NITER_DEFAULT);
he_anafast(&mask_a1b2,&mask_a2b1,0,0,&cl_mask_1221,wa->nside,cw->lmax_a,HE_NITER_DEFAULT);
free(mask_a1b1); free(mask_a1b2); free(mask_a2b1); free(mask_a2b2);
for(ii=0;ii<=cw->lmax_a;ii++) {
cl_mask_1122[ii]*=(ii+0.5)/(2*M_PI);
cl_mask_1221[ii]*=(ii+0.5)/(2*M_PI);
}
#pragma omp parallel default(none) \
shared(cw,cl_mask_1122,cl_mask_1221)
{
int ll2,ll3;
double *wigner_00=NULL;
int lstart=0;
wigner_00=my_malloc(2*(cw->lmax_a+1)*sizeof(double));
if(cw->ncls_a>1)
lstart=2;
#pragma omp for schedule(dynamic)
for(ll2=lstart;ll2<=cw->lmax_a;ll2++) {
for(ll3=lstart;ll3<=cw->lmax_a;ll3++) {
int jj,l1,lmin_here,lmax_here;
int lmin_here_00=0,lmax_here_00=2*(cw->lmax_a+1)+1;
flouble xi_1122=0,xi_1221=0;
drc3jj(ll2,ll3,0,0,&lmin_here_00,&lmax_here_00,wigner_00,2*(cw->lmax_a+1));
lmin_here=lmin_here_00;
lmax_here=lmax_here_00;
for(l1=lmin_here;l1<=lmax_here;l1++) {
if(l1<=cw->lmax_a) {
flouble wfac_1122,wfac_1221;
int j00=l1-lmin_here_00;
wfac_1122=cl_mask_1122[l1]*wigner_00[j00]*wigner_00[j00];
wfac_1221=cl_mask_1221[l1]*wigner_00[j00]*wigner_00[j00];
xi_1122+=wfac_1122;
xi_1221+=wfac_1221;
}
}
cw->xi_1122[ll2+0][ll3+0]=xi_1122;
cw->xi_1221[ll2+0][ll3+0]=xi_1221;
}
} //end omp for
free(wigner_00);
} //end omp parallel
free(cl_mask_1122);
free(cl_mask_1221);
return cw;
}
void nmt_covar_workspace_free(nmt_covar_workspace *cw)
{
int ii;
gsl_permutation_free(cw->coupling_binned_perm_b);
gsl_permutation_free(cw->coupling_binned_perm_a);
gsl_matrix_free(cw->coupling_binned_b);
gsl_matrix_free(cw->coupling_binned_a);
for(ii=0;ii<cw->ncls_a*(cw->lmax_a+1);ii++) {
free(cw->xi_1122[ii]);
free(cw->xi_1221[ii]);
}
nmt_bins_free(cw->bin_a);
nmt_bins_free(cw->bin_b);
free(cw);
}
void nmt_compute_gaussian_covariance(nmt_covar_workspace *cw,
flouble *cla1b1,flouble *cla1b2,flouble *cla2b1,flouble *cla2b2,
flouble *covar_out)
{
int icl_a;
gsl_matrix *covar_binned=gsl_matrix_alloc(cw->ncls_a*cw->bin_a->n_bands,cw->ncls_b*cw->bin_b->n_bands);
for(icl_a=0;icl_a<cw->ncls_a;icl_a++) {
int icl_b;
for(icl_b=0;icl_b<cw->ncls_b;icl_b++) {
int iba;
for(iba=0;iba<cw->bin_a->n_bands;iba++) {
int ibb;
for(ibb=0;ibb<cw->bin_b->n_bands;ibb++) {
double cbinned=0;
int ila;
for(ila=0;ila<cw->bin_a->nell_list[iba];ila++) {
int ilb;
int la=cw->bin_a->ell_list[iba][ila];
for(ilb=0;ilb<cw->bin_b->nell_list[ibb];ilb++) {
int lb=cw->bin_b->ell_list[ibb][ilb];
double xi_1122=cw->xi_1122[cw->ncls_a*la+icl_a][cw->ncls_b*lb+icl_b];
double xi_1221=cw->xi_1221[cw->ncls_a*la+icl_a][cw->ncls_b*lb+icl_b];
double fac_1122=0.5*(cla1b1[la]*cla2b2[lb]+cla1b1[lb]*cla2b2[la]);
double fac_1221=0.5*(cla1b2[la]*cla2b1[lb]+cla1b2[lb]*cla2b1[la]);
cbinned+=(xi_1122*fac_1122+xi_1221*fac_1221)*
cw->bin_a->w_list[iba][ila]*cw->bin_b->w_list[ibb][ilb];
}
}
gsl_matrix_set(covar_binned,cw->ncls_a*iba+icl_a,cw->ncls_b*ibb+icl_b,cbinned);
}
}
}
}
gsl_matrix *covar_out_g =gsl_matrix_alloc(cw->ncls_a*cw->bin_a->n_bands,cw->ncls_b*cw->bin_b->n_bands);
gsl_matrix *mat_tmp =gsl_matrix_alloc(cw->ncls_a*cw->bin_a->n_bands,cw->ncls_b*cw->bin_b->n_bands);
gsl_matrix *inverse_a =gsl_matrix_alloc(cw->ncls_a*cw->bin_a->n_bands,cw->ncls_a*cw->bin_a->n_bands);
gsl_matrix *inverse_b =gsl_matrix_alloc(cw->ncls_b*cw->bin_b->n_bands,cw->ncls_b*cw->bin_b->n_bands);
gsl_linalg_LU_invert(cw->coupling_binned_b,cw->coupling_binned_perm_b,inverse_b); //M_b^-1
gsl_linalg_LU_invert(cw->coupling_binned_a,cw->coupling_binned_perm_a,inverse_a); //M_a^-1
gsl_blas_dgemm(CblasNoTrans,CblasTrans ,1,covar_binned,inverse_b,0,mat_tmp ); //tmp = C * M_b^-1^T
gsl_blas_dgemm(CblasNoTrans,CblasNoTrans,1,inverse_a ,mat_tmp ,0,covar_out_g); //C' = M_a^-1 * C * M_b^-1^T
int ii;
long elem=0;
for(ii=0;ii<cw->ncls_a*cw->bin_a->n_bands;ii++) {
int jj;
for(jj=0;jj<cw->ncls_b*cw->bin_b->n_bands;jj++) {
covar_out[elem]=gsl_matrix_get(covar_out_g,ii,jj);
elem++;
}
}
gsl_matrix_free(covar_binned);
gsl_matrix_free(mat_tmp);
gsl_matrix_free(inverse_a);
gsl_matrix_free(inverse_b);
gsl_matrix_free(covar_out_g);
}
void nmt_covar_workspace_write(nmt_covar_workspace *cw,char *fname)
{
int ii;
FILE *fo=my_fopen(fname,"wb");
my_fwrite(&(cw->lmax_a),sizeof(int),1,fo);
my_fwrite(&(cw->lmax_b),sizeof(int),1,fo);
my_fwrite(&(cw->nside),sizeof(int),1,fo);
my_fwrite(&(cw->ncls_a),sizeof(int),1,fo);
my_fwrite(&(cw->ncls_b),sizeof(int),1,fo);
for(ii=0;ii<cw->ncls_a*(cw->lmax_a+1);ii++)
my_fwrite(cw->xi_1122[ii],sizeof(flouble),cw->ncls_b*(cw->lmax_b+1),fo);
for(ii=0;ii<cw->ncls_a*(cw->lmax_a+1);ii++)
my_fwrite(cw->xi_1221[ii],sizeof(flouble),cw->ncls_b*(cw->lmax_b+1),fo);
my_fwrite(&(cw->bin_a->n_bands),sizeof(int),1,fo);
my_fwrite(cw->bin_a->nell_list,sizeof(int),cw->bin_a->n_bands,fo);
for(ii=0;ii<cw->bin_a->n_bands;ii++) {
my_fwrite(cw->bin_a->ell_list[ii],sizeof(int),cw->bin_a->nell_list[ii],fo);
my_fwrite(cw->bin_a->w_list[ii],sizeof(flouble),cw->bin_a->nell_list[ii],fo);
}
my_fwrite(&(cw->bin_b->n_bands),sizeof(int),1,fo);
my_fwrite(cw->bin_b->nell_list,sizeof(int),cw->bin_b->n_bands,fo);
for(ii=0;ii<cw->bin_b->n_bands;ii++) {
my_fwrite(cw->bin_b->ell_list[ii],sizeof(int),cw->bin_b->nell_list[ii],fo);
my_fwrite(cw->bin_b->w_list[ii],sizeof(flouble),cw->bin_b->nell_list[ii],fo);
}
gsl_matrix_fwrite(fo,cw->coupling_binned_a);
gsl_permutation_fwrite(fo,cw->coupling_binned_perm_a);
gsl_matrix_fwrite(fo,cw->coupling_binned_b);
gsl_permutation_fwrite(fo,cw->coupling_binned_perm_b);
fclose(fo);
}
nmt_covar_workspace *nmt_covar_workspace_read(char *fname)
{
int ii;
nmt_covar_workspace *cw=my_malloc(sizeof(nmt_covar_workspace));
FILE *fi=my_fopen(fname,"rb");
my_fread(&(cw->lmax_a),sizeof(int),1,fi);
my_fread(&(cw->lmax_b),sizeof(int),1,fi);
my_fread(&(cw->nside),sizeof(int),1,fi);
my_fread(&(cw->ncls_a),sizeof(int),1,fi);
my_fread(&(cw->ncls_b),sizeof(int),1,fi);
cw->xi_1122=my_malloc(cw->ncls_a*(cw->lmax_a+1)*sizeof(flouble *));
for(ii=0;ii<cw->ncls_a*(cw->lmax_a+1);ii++) {
cw->xi_1122[ii]=my_malloc(cw->ncls_b*(cw->lmax_b+1)*sizeof(flouble));
my_fread(cw->xi_1122[ii],sizeof(flouble),cw->ncls_b*(cw->lmax_b+1),fi);
}
cw->xi_1221=my_malloc(cw->ncls_a*(cw->lmax_a+1)*sizeof(flouble *));
for(ii=0;ii<cw->ncls_a*(cw->lmax_a+1);ii++) {
cw->xi_1221[ii]=my_malloc(cw->ncls_b*(cw->lmax_b+1)*sizeof(flouble));
my_fread(cw->xi_1221[ii],sizeof(flouble),cw->ncls_b*(cw->lmax_b+1),fi);
}
cw->bin_a=my_malloc(sizeof(nmt_binning_scheme));
my_fread(&(cw->bin_a->n_bands),sizeof(int),1,fi);
cw->bin_a->nell_list=my_malloc(cw->bin_a->n_bands*sizeof(int));
cw->bin_a->ell_list=my_malloc(cw->bin_a->n_bands*sizeof(int *));
cw->bin_a->w_list=my_malloc(cw->bin_a->n_bands*sizeof(flouble *));
my_fread(cw->bin_a->nell_list,sizeof(int),cw->bin_a->n_bands,fi);
for(ii=0;ii<cw->bin_a->n_bands;ii++) {
cw->bin_a->ell_list[ii]=my_malloc(cw->bin_a->nell_list[ii]*sizeof(int));
cw->bin_a->w_list[ii]=my_malloc(cw->bin_a->nell_list[ii]*sizeof(flouble));
my_fread(cw->bin_a->ell_list[ii],sizeof(int),cw->bin_a->nell_list[ii],fi);
my_fread(cw->bin_a->w_list[ii],sizeof(flouble),cw->bin_a->nell_list[ii],fi);
}
cw->bin_b=my_malloc(sizeof(nmt_binning_scheme));
my_fread(&(cw->bin_b->n_bands),sizeof(int),1,fi);
cw->bin_b->nell_list=my_malloc(cw->bin_b->n_bands*sizeof(int));
cw->bin_b->ell_list=my_malloc(cw->bin_b->n_bands*sizeof(int *));
cw->bin_b->w_list=my_malloc(cw->bin_b->n_bands*sizeof(flouble *));
my_fread(cw->bin_b->nell_list,sizeof(int),cw->bin_b->n_bands,fi);
for(ii=0;ii<cw->bin_b->n_bands;ii++) {
cw->bin_b->ell_list[ii]=my_malloc(cw->bin_b->nell_list[ii]*sizeof(int));
cw->bin_b->w_list[ii]=my_malloc(cw->bin_b->nell_list[ii]*sizeof(flouble));
my_fread(cw->bin_b->ell_list[ii],sizeof(int),cw->bin_b->nell_list[ii],fi);
my_fread(cw->bin_b->w_list[ii],sizeof(flouble),cw->bin_b->nell_list[ii],fi);
}
cw->coupling_binned_a=gsl_matrix_alloc(cw->ncls_a*cw->bin_a->n_bands,cw->ncls_a*cw->bin_a->n_bands);
cw->coupling_binned_perm_a=gsl_permutation_alloc(cw->ncls_a*cw->bin_a->n_bands);
gsl_matrix_fread(fi,cw->coupling_binned_a);
gsl_permutation_fread(fi,cw->coupling_binned_perm_a);
cw->coupling_binned_b=gsl_matrix_alloc(cw->ncls_b*cw->bin_b->n_bands,cw->ncls_b*cw->bin_b->n_bands);
cw->coupling_binned_perm_b=gsl_permutation_alloc(cw->ncls_b*cw->bin_b->n_bands);
gsl_matrix_fread(fi,cw->coupling_binned_b);
gsl_permutation_fread(fi,cw->coupling_binned_perm_b);
fclose(fi);
return cw;
}
|
omp_lock.c | #include <stdio.h>
#include <omp.h>
#include "omp_testsuite.h"
int
check_omp_lock (FILE * logFile)
{
omp_lock_t lck;
int nr_threads_in_single = 0;
int result = 0;
int nr_iterations = 0;
int i;
omp_init_lock (&lck);
#pragma omp parallel shared(lck)
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; i++)
{
omp_set_lock (&lck);
#pragma omp flush
nr_threads_in_single++;
#pragma omp flush
nr_iterations++;
nr_threads_in_single--;
result = result + nr_threads_in_single;
omp_unset_lock (&lck);
}
}
omp_destroy_lock (&lck);
return ((result == 0) && (nr_iterations == LOOPCOUNT));
}
int
crosscheck_omp_lock (FILE * logFile)
{
omp_lock_t lck;
int nr_threads_in_single = 0;
int result = 0;
int nr_iterations = 0;
int i;
omp_init_lock (&lck);
#pragma omp parallel shared(lck)
{
#pragma omp for
for (i = 0; i < LOOPCOUNT; i++)
{
/*omp_set_lock(&lck); */
#pragma omp flush
nr_threads_in_single++;
#pragma omp flush
nr_iterations++;
nr_threads_in_single--;
result = result + nr_threads_in_single;
/*omp_unset_lock(&lck); */
}
}
omp_destroy_lock (&lck);
return ((result == 0) && (nr_iterations == LOOPCOUNT));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.